V8 Project
v8::internal::maglev::MaglevGraphBuilder Class Reference

#include <maglev-graph-builder.h>

+ Collaboration diagram for v8::internal::maglev::MaglevGraphBuilder:

Classes

class  BranchBuilder
 
struct  ContinuationOffsets
 
class  DeoptFrameScope
 
struct  ForInState
 
struct  HandlerTableEntry
 
class  LazyDeoptResultLocationScope
 
class  MaglevSubGraphBuilder
 
class  SaveCallSpeculationScope
 

Public Member Functions

 MaglevGraphBuilder (LocalIsolate *local_isolate, MaglevCompilationUnit *compilation_unit, Graph *graph, MaglevCallerDetails *caller_details=nullptr)
 
void Build ()
 
ReduceResult BuildInlineFunction (SourcePosition call_site_position, ValueNode *context, ValueNode *function, ValueNode *new_target)
 
void StartPrologue ()
 
void SetArgument (int i, ValueNode *value)
 
void InitializeRegister (interpreter::Register reg, ValueNode *value)
 
ValueNodeGetArgument (int i)
 
ValueNodeGetInlinedArgument (int i)
 
void BuildRegisterFrameInitialization (ValueNode *context=nullptr, ValueNode *closure=nullptr, ValueNode *new_target=nullptr)
 
void BuildMergeStates ()
 
BasicBlockEndPrologue ()
 
void PeelLoop ()
 
void BuildLoopForPeeling ()
 
void OsrAnalyzePrequel ()
 
void BuildBody ()
 
SmiConstantGetSmiConstant (int constant) const
 
TaggedIndexConstantGetTaggedIndexConstant (int constant)
 
Int32ConstantGetInt32Constant (int32_t constant)
 
IntPtrConstantGetIntPtrConstant (intptr_t constant)
 
Uint32ConstantGetUint32Constant (int constant)
 
Float64ConstantGetFloat64Constant (double constant)
 
Float64ConstantGetFloat64Constant (Float64 constant)
 
RootConstantGetRootConstant (RootIndex index)
 
RootConstantGetBooleanConstant (bool value)
 
ValueNodeGetConstant (compiler::ObjectRef ref)
 
ValueNodeGetTrustedConstant (compiler::HeapObjectRef ref, IndirectPointerTag tag)
 
ValueNodeGetNumberConstant (double constant)
 
Graphgraph () const
 
Zonezone () const
 
MaglevCompilationUnitcompilation_unit () const
 
const InterpreterFrameStatecurrent_interpreter_frame () const
 
MaglevCallerDetailscaller_details () const
 
const DeoptFrameScopecurrent_deopt_scope () const
 
compiler::JSHeapBrokerbroker () const
 
LocalIsolatelocal_isolate () const
 
bool has_graph_labeller () const
 
MaglevGraphLabellergraph_labeller () const
 
bool is_inline () const
 
int inlining_depth () const
 
bool is_eager_inline () const
 
DeoptFrame GetLatestCheckpointedFrame ()
 
bool need_checkpointed_loop_entry ()
 
bool TopLevelFunctionPassMaglevPrintFilter ()
 
void RecordUseReprHint (Phi *phi, UseRepresentationSet reprs)
 
void RecordUseReprHint (Phi *phi, UseRepresentation repr)
 
void RecordUseReprHintIfPhi (ValueNode *node, UseRepresentation repr)
 
void set_current_block (BasicBlock *block)
 
BasicBlockFinishInlinedBlockForCaller (ControlNode *control_node, ZoneVector< Node * > rem_nodes_in_call_block)
 
ZoneVector< Node * > & node_buffer ()
 
uint32_t NewObjectId ()
 
bool is_turbolev () const
 
bool is_non_eager_inlining_enabled () const
 
int max_inlined_bytecode_size ()
 
int max_inlined_bytecode_size_small ()
 
float min_inlining_frequency ()
 
int max_inlined_bytecode_size_cumulative ()
 
int max_inline_depth ()
 
DeoptFrameAddInlinedArgumentsToDeoptFrame (DeoptFrame *deopt_frame, const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
 

Private Types

enum  ContextSlotMutability { kImmutable , kMutable }
 
enum class  StringAtOOBMode { kElement , kCharAt }
 
enum class  TrackObjectMode { kLoad , kStore }
 
enum  InferHasInPrototypeChainResult { kMayBeInPrototypeChain , kIsInPrototypeChain , kIsNotInPrototypeChain }
 
enum class  BranchType { kBranchIfTrue , kBranchIfFalse }
 
enum class  BranchSpecializationMode { kDefault , kAlwaysBoolean }
 
enum class  BranchResult { kDefault , kAlwaysTrue , kAlwaysFalse }
 
using InitialCallback = std::function< ReduceResult(ValueNode *)>
 
using ProcessElementCallback = std::function< void(ValueNode *, ValueNode *)>
 
using GetDeoptScopeCallback = std::function< DeoptFrameScope(compiler::JSFunctionRef, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *, ValueNode *)>
 
using TypeOfLiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag
 

Private Member Functions

bool CheckType (ValueNode *node, NodeType type, NodeType *old=nullptr)
 
NodeType CheckTypes (ValueNode *node, std::initializer_list< NodeType > types)
 
bool EnsureType (ValueNode *node, NodeType type, NodeType *old=nullptr)
 
template<typename Function >
bool EnsureType (ValueNode *node, NodeType type, Function ensure_new_type)
 
NodeType GetType (ValueNode *node)
 
NodeInfoGetOrCreateInfoFor (ValueNode *node)
 
bool HaveDisjointTypes (ValueNode *lhs, ValueNode *rhs)
 
bool HasDisjointType (ValueNode *lhs, NodeType rhs_type)
 
void SetKnownValue (ValueNode *node, compiler::ObjectRef constant, NodeType new_node_type)
 
bool ShouldEmitInterruptBudgetChecks ()
 
bool ShouldEmitOsrInterruptBudgetChecks ()
 
bool MaglevIsTopTier () const
 
BasicBlockCreateEdgeSplitBlock (BasicBlockRef &jump_targets, BasicBlock *predecessor)
 
void ProcessMergePointAtExceptionHandlerStart (int offset)
 
void ProcessMergePoint (int offset, bool preserve_known_node_aspects)
 
void ProcessMergePointPredecessors (MergePointInterpreterFrameState &merge_state, BasicBlockRef &jump_targets)
 
void RegisterPhisWithGraphLabeller (MergePointInterpreterFrameState &merge_state)
 
bool IsOffsetAMergePoint (int offset)
 
ValueNodeGetContextAtDepth (ValueNode *context, size_t depth)
 
bool CheckContextExtensions (size_t depth)
 
ReduceResult EmitUnconditionalDeopt (DeoptimizeReason reason)
 
void KillPeeledLoopTargets (int peelings)
 
void MarkBytecodeDead ()
 
void UpdateSourceAndBytecodePosition (int offset)
 
void PrintVirtualObjects ()
 
void VisitSingleBytecode ()
 
void AddInitializedNodeToGraph (Node *node)
 
template<typename NodeT , typename Function , typename... Args>
NodeTAddNewNode (size_t input_count, Function &&post_create_input_initializer, Args &&... args)
 
template<typename NodeT , typename... Args>
NodeTAddNewNodeOrGetEquivalent (std::initializer_list< ValueNode * > raw_inputs, Args &&... args)
 
template<typename NodeT , typename... Args>
NodeTAddNewNode (std::initializer_list< ValueNode * > inputs, Args &&... args)
 
template<typename NodeT , typename... Args>
NodeTCreateNewConstantNode (Args &&... args) const
 
template<typename NodeT >
NodeTAttachExtraInfoAndAddToGraph (NodeT *node)
 
template<typename NodeT >
void AttachDeoptCheckpoint (NodeT *node)
 
template<typename NodeT >
void AttachEagerDeoptInfo (NodeT *node)
 
template<typename NodeT >
void AttachLazyDeoptInfo (NodeT *node)
 
template<typename NodeT >
void AttachExceptionHandlerInfo (NodeT *node)
 
bool IsInsideTryBlock () const
 
MergePointInterpreterFrameStateGetCatchBlockFrameState ()
 
CatchBlockDetails GetCurrentTryCatchBlock ()
 
CatchBlockDetails GetTryCatchBlockForNonEagerInlining (ExceptionHandlerInfo *info)
 
bool ContextMayAlias (ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
 
bool TrySpecializeLoadContextSlotToFunctionContext (ValueNode *context, int slot_index, ContextSlotMutability slot_mutability)
 
ValueNodeTrySpecializeLoadContextSlot (ValueNode *context, int index)
 
ValueNodeLoadAndCacheContextSlot (ValueNode *context, int offset, ContextSlotMutability slot_mutability, ContextMode context_mode)
 
MaybeReduceResult TrySpecializeStoreContextSlot (ValueNode *context, int index, ValueNode *value, Node **store)
 
ReduceResult StoreAndCacheContextSlot (ValueNode *context, int index, ValueNode *value, ContextMode context_mode)
 
ValueNodeTryGetParentContext (ValueNode *node)
 
void MinimizeContextChainDepth (ValueNode **context, size_t *depth)
 
void EscapeContext ()
 
void BuildLoadContextSlot (ValueNode *context, size_t depth, int slot_index, ContextSlotMutability slot_mutability, ContextMode context_mode)
 
ReduceResult BuildStoreContextSlot (ValueNode *context, size_t depth, int slot_index, ValueNode *value, ContextMode context_mode)
 
void BuildStoreMap (ValueNode *object, compiler::MapRef map, StoreMap::Kind kind)
 
ValueNodeBuildExtendPropertiesBackingStore (compiler::MapRef map, ValueNode *receiver, ValueNode *property_array)
 
template<Builtin kBuiltin>
CallBuiltinBuildCallBuiltin (std::initializer_list< ValueNode * > inputs)
 
template<Builtin kBuiltin>
CallBuiltinBuildCallBuiltin (std::initializer_list< ValueNode * > inputs, compiler::FeedbackSource const &feedback, CallBuiltin::FeedbackSlotType slot_type=CallBuiltin::kTaggedIndex)
 
CallCPPBuiltinBuildCallCPPBuiltin (Builtin builtin, ValueNode *target, ValueNode *new_target, std::initializer_list< ValueNode * > inputs)
 
ReduceResult BuildLoadGlobal (compiler::NameRef name, compiler::FeedbackSource &feedback_source, TypeofMode typeof_mode)
 
ValueNodeBuildToString (ValueNode *value, ToString::ConversionMode mode)
 
constexpr bool RuntimeFunctionCanThrow (Runtime::FunctionId function_id)
 
ReduceResult BuildCallRuntime (Runtime::FunctionId function_id, std::initializer_list< ValueNode * > inputs)
 
ReduceResult BuildAbort (AbortReason reason)
 
void Print (const char *str)
 
void Print (ValueNode *value)
 
void Print (const char *str, ValueNode *value)
 
ValueNodeGetFeedbackCell ()
 
ValueNodeGetClosure () const
 
ValueNodeGetContext () const
 
void SetContext (ValueNode *context)
 
FeedbackSlot GetSlotOperand (int operand_index) const
 
uint32_t GetFlag8Operand (int operand_index) const
 
uint32_t GetFlag16Operand (int operand_index) const
 
template<class T >
compiler::ref_traits< T >::ref_type GetRefOperand (int operand_index) requires(is_taggable_v< T >)
 
MaybeReduceResult GetConstantSingleCharacterStringFromCode (uint16_t)
 
ValueNodeGetRegisterInput (Register reg)
 
void MoveNodeBetweenRegisters (interpreter::Register src, interpreter::Register dst)
 
ValueNodeGetTaggedValue (ValueNode *value, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
 
ReduceResult GetSmiValue (ValueNode *value, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
 
MaybeReduceResult GetSmiValue (interpreter::Register reg, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
 
ValueNodeGetTaggedValue (interpreter::Register reg, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
 
ValueNodeGetInternalizedString (interpreter::Register reg)
 
ValueNodeGetTruncatedInt32ForToNumber (ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetTruncatedInt32ForToNumber (interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetUint8ClampedForToNumber (ValueNode *value)
 
ValueNodeGetUint8ClampedForToNumber (interpreter::Register reg)
 
std::optional< int32_t > TryGetInt32Constant (ValueNode *value)
 
std::optional< uint32_t > TryGetUint32Constant (ValueNode *value)
 
ValueNodeGetInt32 (ValueNode *value, bool can_be_heap_number=false)
 
void EnsureInt32 (ValueNode *value, bool can_be_heap_number=false)
 
void EnsureInt32 (interpreter::Register reg)
 
std::optional< double > TryGetFloat64Constant (ValueNode *value, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetFloat64 (ValueNode *value)
 
ValueNodeGetFloat64 (interpreter::Register reg)
 
ValueNodeGetHoleyFloat64 (ValueNode *value, bool convert_hole_to_undefined)
 
ValueNodeGetFloat64ForToNumber (ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetFloat64ForToNumber (interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetHoleyFloat64ForToNumber (ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetHoleyFloat64ForToNumber (interpreter::Register reg, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetAccumulator ()
 
MaybeReduceResult GetAccumulatorSmi (UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
 
ValueNodeGetAccumulatorTruncatedInt32ForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetAccumulatorUint8ClampedForToNumber ()
 
ValueNodeGetAccumulatorHoleyFloat64ForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ValueNodeGetSilencedNaN (ValueNode *value)
 
bool IsRegisterEqualToAccumulator (int operand_index)
 
ValueNodeLoadRegister (int operand_index)
 
ValueNodeLoadRegisterHoleyFloat64ForToNumber (int operand_index, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
template<typename NodeT >
void SetAccumulator (NodeT *node)
 
void ClobberAccumulator ()
 
ValueNodeGetSecondValue (ValueNode *result)
 
template<typename NodeT >
void StoreRegister (interpreter::Register target, NodeT *value)
 
void SetAccumulatorInBranch (ValueNode *value)
 
template<typename NodeT >
void StoreRegisterPair (std::pair< interpreter::Register, interpreter::Register > target, NodeT *value)
 
std::pair< interpreter::Register, intGetResultLocationAndSize () const
 
DeoptFrameGetCallerDeoptFrame ()
 
DeoptFrameGetDeoptFrameForEagerCall (const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
 
DeoptFrame GetDeoptFrameForLazyDeopt (interpreter::Register result_location, int result_size)
 
DeoptFrame GetDeoptFrameForLazyDeoptHelper (interpreter::Register result_location, int result_size, DeoptFrameScope *scope, bool mark_accumulator_dead)
 
InterpretedDeoptFrame GetDeoptFrameForEntryStackCheck ()
 
template<typename NodeT >
void MarkPossibleSideEffect (NodeT *node)
 
template<bool is_possible_map_change = true>
void ResetBuilderCachedState ()
 
int next_offset () const
 
const compiler::BytecodeLivenessStateGetInLiveness () const
 
const compiler::BytecodeLivenessStateGetInLivenessFor (int offset) const
 
const compiler::BytecodeLivenessStateGetOutLiveness () const
 
const compiler::BytecodeLivenessStateGetOutLivenessFor (int offset) const
 
void StartNewBlock (int offset, BasicBlock *predecessor)
 
void StartNewBlock (BasicBlock *predecessor, MergePointInterpreterFrameState *merge_state, BasicBlockRef &refs_to_block)
 
template<UseReprHintRecording hint = UseReprHintRecording::kRecord>
ValueNodeConvertInputTo (ValueNode *input, ValueRepresentation expected)
 
template<typename NodeT >
void SetNodeInputs (NodeT *node, std::initializer_list< ValueNode * > inputs)
 
void FlushNodesToBlock ()
 
template<typename ControlNodeT , typename... Args>
BasicBlockFinishBlock (std::initializer_list< ValueNode * > control_inputs, Args &&... args)
 
void StartFallthroughBlock (int next_block_offset, BasicBlock *predecessor)
 
ValueNodeGetValueOrUndefined (ValueNode *maybe_value)
 
ValueNodeGetConvertReceiver (compiler::SharedFunctionInfoRef shared, const CallArguments &args)
 
base::Vector< ValueNode * > GetArgumentsAsArrayOfValueNodes (compiler::SharedFunctionInfoRef shared, const CallArguments &args)
 
compiler::OptionalHeapObjectRef TryGetConstant (ValueNode *node, ValueNode **constant_node=nullptr)
 
std::optional< ValueNode * > TryGetConstantAlternative (ValueNode *node)
 
template<typename LoadNode >
MaybeReduceResult TryBuildLoadDataView (const CallArguments &args, ExternalArrayType type)
 
template<typename StoreNode , typename Function >
MaybeReduceResult TryBuildStoreDataView (const CallArguments &args, ExternalArrayType type, Function &&getValue)
 
MaybeReduceResult TryReduceDatePrototypeGetField (compiler::JSFunctionRef target, CallArguments &args, JSDate::FieldIndex field)
 
MaybeReduceResult TryReduceArrayIteratingBuiltin (const char *name, compiler::JSFunctionRef target, CallArguments &args, GetDeoptScopeCallback get_eager_deopt_scope, GetDeoptScopeCallback get_lazy_deopt_scope, const std::optional< InitialCallback > &initial_callback={}, const std::optional< ProcessElementCallback > &process_element_callback={})
 
MaybeReduceResult TryReduceConstantStringAt (ValueNode *object, ValueNode *index, StringAtOOBMode oob_mode)
 
MaybeReduceResult TryReduceGetProto (ValueNode *node)
 
template<typename MapKindsT , typename IndexToElementsKindFunc , typename BuildKindSpecificFunc >
MaybeReduceResult BuildJSArrayBuiltinMapSwitchOnElementsKind (ValueNode *receiver, const MapKindsT &map_kinds, MaglevSubGraphBuilder &sub_graph, std::optional< MaglevSubGraphBuilder::Label > &do_return, int unique_kind_count, IndexToElementsKindFunc &&index_to_elements_kind, BuildKindSpecificFunc &&build_kind_specific)
 
MaybeReduceResult DoTryReduceMathRound (CallArguments &args, Float64Round::Kind kind)
 
template<typename CallNode , typename... Args>
CallNode * AddNewCallNode (const CallArguments &args, Args &&... extra_args)
 
MaybeReduceResult TryReduceGetIterator (ValueNode *receiver, int load_slot, int call_slot)
 
ValueNodeBuildCallSelf (ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, CallArguments &args)
 
MaybeReduceResult TryReduceBuiltin (compiler::JSFunctionRef target, compiler::SharedFunctionInfoRef shared, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
bool TargetIsCurrentCompilingUnit (compiler::JSFunctionRef target)
 
CallKnownJSFunctionBuildCallKnownJSFunction (ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
CallKnownJSFunctionBuildCallKnownJSFunction (ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, base::Vector< ValueNode * > arguments)
 
MaybeReduceResult TryBuildCallKnownJSFunction (compiler::JSFunctionRef function, ValueNode *new_target, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
MaybeReduceResult TryBuildCallKnownJSFunction (ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
bool CanInlineCall (compiler::SharedFunctionInfoRef shared, float call_frequency)
 
bool ShouldEagerInlineCall (compiler::SharedFunctionInfoRef shared)
 
ReduceResult BuildEagerInlineCall (ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, float call_frequency)
 
MaybeReduceResult TryBuildInlineCall (ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
ValueNodeBuildGenericCall (ValueNode *target, Call::TargetType target_type, const CallArguments &args)
 
MaybeReduceResult TryReduceCallForConstant (compiler::JSFunctionRef target, CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
 
MaybeReduceResult TryReduceCallForTarget (ValueNode *target_node, compiler::JSFunctionRef target, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
MaybeReduceResult TryReduceCallForNewClosure (ValueNode *target_node, ValueNode *target_context, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
MaybeReduceResult TryBuildCallKnownApiFunction (compiler::JSFunctionRef function, compiler::SharedFunctionInfoRef shared, CallArguments &args)
 
compiler::HolderLookupResult TryInferApiHolderValue (compiler::FunctionTemplateInfoRef function_template_info, ValueNode *receiver)
 
MaybeReduceResult TryReduceCallForApiFunction (compiler::FunctionTemplateInfoRef api_callback, compiler::OptionalSharedFunctionInfoRef maybe_shared, CallArguments &args)
 
MaybeReduceResult TryReduceFunctionPrototypeApplyCallWithReceiver (compiler::OptionalHeapObjectRef maybe_receiver, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
ReduceResult ReduceCallWithArrayLikeForArgumentsObject (ValueNode *target_node, CallArguments &args, VirtualObject *arguments_object, const compiler::FeedbackSource &feedback_source)
 
ReduceResult ReduceCallWithArrayLike (ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
ReduceResult ReduceCall (ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
 
ReduceResult BuildCallWithFeedback (ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source)
 
ReduceResult BuildCallFromRegisterList (ConvertReceiverMode receiver_mode)
 
ReduceResult BuildCallFromRegisters (int argc_count, ConvertReceiverMode receiver_mode)
 
ValueNodeBuildElementsArray (int length)
 
ReduceResult BuildAndAllocateKeyValueArray (ValueNode *key, ValueNode *value)
 
ReduceResult BuildAndAllocateJSArray (compiler::MapRef map, ValueNode *length, ValueNode *elements, const compiler::SlackTrackingPrediction &slack_tracking_prediction, AllocationType allocation_type)
 
ValueNodeBuildAndAllocateJSArrayIterator (ValueNode *array, IterationKind iteration_kind)
 
MaybeReduceResult TryBuildAndAllocateJSGeneratorObject (ValueNode *closure, ValueNode *receiver)
 
ValueNodeBuildGenericConstruct (ValueNode *target, ValueNode *new_target, ValueNode *context, const CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
 
MaybeReduceResult TryReduceConstructArrayConstructor (compiler::JSFunctionRef array_function, CallArguments &args, compiler::OptionalAllocationSiteRef maybe_allocation_site={})
 
MaybeReduceResult TryReduceConstructBuiltin (compiler::JSFunctionRef builtin, compiler::SharedFunctionInfoRef shared_function_info, ValueNode *target, CallArguments &args)
 
MaybeReduceResult TryReduceConstructGeneric (compiler::JSFunctionRef function, compiler::SharedFunctionInfoRef shared_function_info, ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)
 
MaybeReduceResult TryReduceConstruct (compiler::HeapObjectRef feedback_target, ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)
 
ReduceResult BuildConstruct (ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)
 
MaybeReduceResult TryBuildScriptContextStore (const compiler::GlobalAccessFeedback &global_access_feedback)
 
MaybeReduceResult TryBuildPropertyCellStore (const compiler::GlobalAccessFeedback &global_access_feedback)
 
MaybeReduceResult TryBuildGlobalStore (const compiler::GlobalAccessFeedback &global_access_feedback)
 
MaybeReduceResult TryBuildScriptContextConstantLoad (const compiler::GlobalAccessFeedback &global_access_feedback)
 
MaybeReduceResult TryBuildScriptContextLoad (const compiler::GlobalAccessFeedback &global_access_feedback)
 
MaybeReduceResult TryBuildPropertyCellLoad (const compiler::GlobalAccessFeedback &global_access_feedback)
 
MaybeReduceResult TryBuildGlobalLoad (const compiler::GlobalAccessFeedback &global_access_feedback)
 
bool TryBuildFindNonDefaultConstructorOrConstruct (ValueNode *this_function, ValueNode *new_target, std::pair< interpreter::Register, interpreter::Register > result)
 
ValueNodeBuildSmiUntag (ValueNode *node)
 
ValueNodeBuildNumberOrOddballToFloat64 (ValueNode *node, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
ReduceResult BuildCheckSmi (ValueNode *object, bool elidable=true)
 
ReduceResult BuildCheckNumber (ValueNode *object)
 
ReduceResult BuildCheckHeapObject (ValueNode *object)
 
ReduceResult BuildCheckJSFunction (ValueNode *object)
 
ReduceResult BuildCheckJSReceiver (ValueNode *object)
 
ReduceResult BuildCheckJSReceiverOrNullOrUndefined (ValueNode *object)
 
ReduceResult BuildCheckSeqOneByteString (ValueNode *object)
 
ReduceResult BuildCheckString (ValueNode *object)
 
ReduceResult BuildCheckStringOrStringWrapper (ValueNode *object)
 
ReduceResult BuildCheckStringOrOddball (ValueNode *object)
 
ReduceResult BuildCheckSymbol (ValueNode *object)
 
ReduceResult BuildCheckMaps (ValueNode *object, base::Vector< const compiler::MapRef > maps, std::optional< ValueNode * > map={}, bool has_deprecated_map_without_migration_target=false, bool migration_done_outside=false)
 
ReduceResult BuildTransitionElementsKindOrCheckMap (ValueNode *heap_object, ValueNode *object_map, const ZoneVector< compiler::MapRef > &transition_sources, compiler::MapRef transition_target)
 
ReduceResult BuildCompareMaps (ValueNode *heap_object, ValueNode *object_map, base::Vector< const compiler::MapRef > maps, MaglevSubGraphBuilder *sub_graph, std::optional< MaglevSubGraphBuilder::Label > &if_not_matched)
 
ReduceResult BuildTransitionElementsKindAndCompareMaps (ValueNode *heap_object, ValueNode *object_map, const ZoneVector< compiler::MapRef > &transition_sources, compiler::MapRef transition_target, MaglevSubGraphBuilder *sub_graph, std::optional< MaglevSubGraphBuilder::Label > &if_not_matched)
 
ReduceResult BuildCheckInternalizedStringValueOrByReference (ValueNode *node, compiler::HeapObjectRef ref, DeoptimizeReason reason)
 
ReduceResult BuildCheckNumericalValueOrByReference (ValueNode *node, compiler::ObjectRef ref, DeoptimizeReason reason)
 
ReduceResult BuildCheckValueByReference (ValueNode *node, compiler::HeapObjectRef ref, DeoptimizeReason reason)
 
ReduceResult BuildCheckNumericalValue (ValueNode *node, compiler::ObjectRef ref, DeoptimizeReason reason)
 
ValueNodeBuildConvertHoleToUndefined (ValueNode *node)
 
ReduceResult BuildCheckNotHole (ValueNode *node)
 
template<bool flip = false>
ValueNodeBuildToBoolean (ValueNode *node)
 
ValueNodeBuildLogicalNot (ValueNode *value)
 
ValueNodeBuildTestUndetectable (ValueNode *value)
 
ReduceResult BuildToNumberOrToNumeric (Object::Conversion mode)
 
bool CanTrackObjectChanges (ValueNode *object, TrackObjectMode mode)
 
bool CanElideWriteBarrier (ValueNode *object, ValueNode *value)
 
void BuildInitializeStore (InlinedAllocation *alloc, ValueNode *value, int offset)
 
void TryBuildStoreTaggedFieldToAllocation (ValueNode *object, ValueNode *value, int offset)
 
template<typename Instruction = LoadTaggedField, typename... Args>
ValueNodeBuildLoadTaggedField (ValueNode *object, uint32_t offset, Args &&... args)
 
NodeBuildStoreTaggedField (ValueNode *object, ValueNode *value, int offset, StoreTaggedMode store_mode)
 
NodeBuildStoreTaggedFieldNoWriteBarrier (ValueNode *object, ValueNode *value, int offset, StoreTaggedMode store_mode)
 
void BuildStoreTrustedPointerField (ValueNode *object, ValueNode *value, int offset, IndirectPointerTag tag, StoreTaggedMode store_mode)
 
ValueNodeBuildLoadFixedArrayElement (ValueNode *elements, int index)
 
ValueNodeBuildLoadFixedArrayElement (ValueNode *elements, ValueNode *index)
 
void BuildStoreFixedArrayElement (ValueNode *elements, ValueNode *index, ValueNode *value)
 
ValueNodeBuildLoadFixedDoubleArrayElement (ValueNode *elements, int index)
 
ValueNodeBuildLoadFixedDoubleArrayElement (ValueNode *elements, ValueNode *index)
 
void BuildStoreFixedDoubleArrayElement (ValueNode *elements, ValueNode *index, ValueNode *value)
 
ValueNodeBuildLoadHoleyFixedDoubleArrayElement (ValueNode *elements, ValueNode *index, bool convert_hole)
 
ValueNodeGetInt32ElementIndex (interpreter::Register reg)
 
ValueNodeGetInt32ElementIndex (ValueNode *index_object)
 
ReduceResult GetUint32ElementIndex (interpreter::Register reg)
 
ReduceResult GetUint32ElementIndex (ValueNode *index_object)
 
bool CanTreatHoleAsUndefined (base::Vector< const compiler::MapRef > const &receiver_maps)
 
compiler::OptionalObjectRef TryFoldLoadDictPrototypeConstant (compiler::PropertyAccessInfo const &access_info)
 
compiler::OptionalJSObjectRef TryGetConstantDataFieldHolder (compiler::PropertyAccessInfo const &access_info, ValueNode *lookup_start_object)
 
compiler::OptionalObjectRef TryFoldLoadConstantDataField (compiler::JSObjectRef holder, compiler::PropertyAccessInfo const &access_info)
 
std::optional< Float64TryFoldLoadConstantDoubleField (compiler::JSObjectRef holder, compiler::PropertyAccessInfo const &access_info)
 
ValueNodeBuildLoadField (compiler::PropertyAccessInfo const &access_info, ValueNode *lookup_start_object, compiler::NameRef name)
 
MaybeReduceResult TryBuildStoreField (compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, compiler::AccessMode access_mode)
 
MaybeReduceResult TryBuildPropertyGetterCall (compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, ValueNode *lookup_start_object)
 
MaybeReduceResult TryBuildPropertySetterCall (compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, ValueNode *lookup_start_object, ValueNode *value)
 
MaybeReduceResult TryBuildGetKeyedPropertyWithEnumeratedKey (ValueNode *object, const compiler::FeedbackSource &feedback_source, const compiler::ProcessedFeedback &processed_feedback)
 
ReduceResult BuildGetKeyedProperty (ValueNode *object, const compiler::FeedbackSource &feedback_source, const compiler::ProcessedFeedback &processed_feedback)
 
ValueNodeBuildLoadFixedArrayLength (ValueNode *fixed_array)
 
ValueNodeBuildLoadJSArrayLength (ValueNode *js_array, NodeType length_type=NodeType::kSmi)
 
ValueNodeBuildLoadElements (ValueNode *object)
 
ValueNodeBuildLoadJSFunctionFeedbackCell (ValueNode *closure)
 
ValueNodeBuildLoadJSFunctionContext (ValueNode *closure)
 
MaybeReduceResult TryBuildCheckInt32Condition (ValueNode *lhs, ValueNode *rhs, AssertCondition condition, DeoptimizeReason reason, bool allow_unconditional_deopt=true)
 
MaybeReduceResult TryBuildPropertyLoad (ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info)
 
MaybeReduceResult TryBuildPropertyStore (ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info, compiler::AccessMode access_mode)
 
MaybeReduceResult TryBuildPropertyAccess (ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info, compiler::AccessMode access_mode)
 
template<typename GenericAccessFunc >
MaybeReduceResult TryBuildNamedAccess (ValueNode *receiver, ValueNode *lookup_start_object, compiler::NamedAccessFeedback const &feedback, compiler::FeedbackSource const &feedback_source, compiler::AccessMode access_mode, GenericAccessFunc &&build_generic_access)
 
template<typename GenericAccessFunc >
MaybeReduceResult TryBuildLoadNamedProperty (ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::FeedbackSource &feedback_source, GenericAccessFunc &&build_generic_access)
 
MaybeReduceResult TryBuildLoadNamedProperty (ValueNode *receiver, compiler::NameRef name, compiler::FeedbackSource &feedback_source)
 
ReduceResult BuildLoadTypedArrayLength (ValueNode *object, ElementsKind elements_kind)
 
ValueNodeBuildLoadTypedArrayElement (ValueNode *object, ValueNode *index, ElementsKind elements_kind)
 
ValueNodeBuildLoadConstantTypedArrayElement (compiler::JSTypedArrayRef typed_array, ValueNode *index, ElementsKind elements_kind)
 
void BuildStoreTypedArrayElement (ValueNode *object, ValueNode *index, ElementsKind elements_kind)
 
void BuildStoreConstantTypedArrayElement (compiler::JSTypedArrayRef typed_array, ValueNode *index, ElementsKind elements_kind)
 
MaybeReduceResult TryBuildElementAccessOnString (ValueNode *object, ValueNode *index, const compiler::ElementAccessFeedback &access_info, compiler::KeyedAccessMode const &keyed_mode)
 
MaybeReduceResult TryBuildElementAccessOnTypedArray (ValueNode *object, ValueNode *index, const compiler::ElementAccessInfo &access_info, compiler::KeyedAccessMode const &keyed_mode)
 
MaybeReduceResult TryBuildElementLoadOnJSArrayOrJSObject (ValueNode *object, ValueNode *index, base::Vector< const compiler::MapRef > maps, ElementsKind kind, KeyedAccessLoadMode load_mode)
 
MaybeReduceResult TryBuildElementStoreOnJSArrayOrJSObject (ValueNode *object, ValueNode *index_object, ValueNode *value, base::Vector< const compiler::MapRef > maps, ElementsKind kind, const compiler::KeyedAccessMode &keyed_mode)
 
MaybeReduceResult TryBuildElementAccessOnJSArrayOrJSObject (ValueNode *object, ValueNode *index, const compiler::ElementAccessInfo &access_info, compiler::KeyedAccessMode const &keyed_mode)
 
template<typename GenericAccessFunc >
MaybeReduceResult TryBuildElementAccess (ValueNode *object, ValueNode *index, compiler::ElementAccessFeedback const &feedback, compiler::FeedbackSource const &feedback_source, GenericAccessFunc &&build_generic_access)
 
template<typename GenericAccessFunc >
MaybeReduceResult TryBuildPolymorphicElementAccess (ValueNode *object, ValueNode *index, const compiler::KeyedAccessMode &keyed_mode, const ZoneVector< compiler::ElementAccessInfo > &access_infos, GenericAccessFunc &&build_generic_access)
 
template<typename GenericAccessFunc >
MaybeReduceResult TryBuildPolymorphicPropertyAccess (ValueNode *receiver, ValueNode *lookup_start_object, compiler::NamedAccessFeedback const &feedback, compiler::AccessMode access_mode, const ZoneVector< compiler::PropertyAccessInfo > &access_infos, GenericAccessFunc &&build_generic_access)
 
std::optional< ContinuationOffsetsFindContinuationForPolymorphicPropertyLoad ()
 
void BuildContinuationForPolymorphicPropertyLoad (const ContinuationOffsets &offsets)
 
void RecordKnownProperty (ValueNode *lookup_start_object, KnownNodeAspects::LoadedPropertyMapKey key, ValueNode *value, bool is_const, compiler::AccessMode access_mode)
 
MaybeReduceResult TryReuseKnownPropertyLoad (ValueNode *lookup_start_object, compiler::NameRef name)
 
ValueNodeBuildLoadStringLength (ValueNode *string)
 
ReduceResult ConvertForStoring (ValueNode *node, ElementsKind kind)
 
InferHasInPrototypeChainResult InferHasInPrototypeChain (ValueNode *receiver, compiler::HeapObjectRef prototype)
 
MaybeReduceResult TryBuildFastHasInPrototypeChain (ValueNode *object, compiler::HeapObjectRef prototype)
 
ReduceResult BuildHasInPrototypeChain (ValueNode *object, compiler::HeapObjectRef prototype)
 
MaybeReduceResult TryBuildFastOrdinaryHasInstance (ValueNode *object, compiler::JSObjectRef callable, ValueNode *callable_node)
 
ReduceResult BuildOrdinaryHasInstance (ValueNode *object, compiler::JSObjectRef callable, ValueNode *callable_node)
 
MaybeReduceResult TryBuildFastInstanceOf (ValueNode *object, compiler::JSObjectRef callable_ref, ValueNode *callable_node)
 
MaybeReduceResult TryBuildFastInstanceOfWithFeedback (ValueNode *object, ValueNode *callable, compiler::FeedbackSource feedback_source)
 
VirtualObjectGetObjectFromAllocation (InlinedAllocation *allocation)
 
VirtualObjectGetModifiableObjectFromAllocation (InlinedAllocation *allocation)
 
VirtualObjectDeepCopyVirtualObject (VirtualObject *vobj)
 
VirtualObjectCreateVirtualObject (compiler::MapRef map, uint32_t slot_count_including_map)
 
VirtualObjectCreateHeapNumber (Float64 value)
 
VirtualObjectCreateDoubleFixedArray (uint32_t elements_length, compiler::FixedDoubleArrayRef elements)
 
VirtualObjectCreateJSObject (compiler::MapRef map)
 
VirtualObjectCreateConsString (ValueNode *map, ValueNode *length, ValueNode *first, ValueNode *second)
 
ReduceResult CreateJSArray (compiler::MapRef map, int instance_size, ValueNode *length)
 
VirtualObjectCreateJSArrayIterator (compiler::MapRef map, ValueNode *iterated_object, IterationKind kind)
 
VirtualObjectCreateJSConstructor (compiler::JSFunctionRef constructor)
 
VirtualObjectCreateFixedArray (compiler::MapRef map, int length)
 
VirtualObjectCreateContext (compiler::MapRef map, int length, compiler::ScopeInfoRef scope_info, ValueNode *previous_context, std::optional< ValueNode * > extension={})
 
VirtualObjectCreateArgumentsObject (compiler::MapRef map, ValueNode *length, ValueNode *elements, std::optional< ValueNode * > callee={})
 
VirtualObjectCreateMappedArgumentsElements (compiler::MapRef map, int mapped_count, ValueNode *context, ValueNode *unmapped_elements)
 
VirtualObjectCreateRegExpLiteralObject (compiler::MapRef map, compiler::RegExpBoilerplateDescriptionRef literal)
 
VirtualObjectCreateJSGeneratorObject (compiler::MapRef map, int instance_size, ValueNode *context, ValueNode *closure, ValueNode *receiver, ValueNode *register_file)
 
VirtualObjectCreateJSIteratorResult (compiler::MapRef map, ValueNode *value, ValueNode *done)
 
VirtualObjectCreateJSStringIterator (compiler::MapRef map, ValueNode *string)
 
InlinedAllocationExtendOrReallocateCurrentAllocationBlock (AllocationType allocation_type, VirtualObject *value)
 
void ClearCurrentAllocationBlock ()
 
void AddDeoptUse (ValueNode *node)
 
void AddDeoptUse (VirtualObject *alloc)
 
void AddNonEscapingUses (InlinedAllocation *allocation, int use_count)
 
std::optional< VirtualObject * > TryGetNonEscapingArgumentsObject (ValueNode *value)
 
MaybeReduceResult TryBuildFastCreateObjectOrArrayLiteral (const compiler::LiteralFeedback &feedback)
 
std::optional< VirtualObject * > TryReadBoilerplateForFastLiteral (compiler::JSObjectRef boilerplate, AllocationType allocation, int max_depth, int *max_properties)
 
InlinedAllocationBuildInlinedAllocationForConsString (VirtualObject *object, AllocationType allocation)
 
InlinedAllocationBuildInlinedAllocationForHeapNumber (VirtualObject *object, AllocationType allocation)
 
InlinedAllocationBuildInlinedAllocationForDoubleFixedArray (VirtualObject *object, AllocationType allocation)
 
InlinedAllocationBuildInlinedAllocation (VirtualObject *object, AllocationType allocation)
 
ValueNodeBuildInlinedArgumentsElements (int start_index, int length)
 
ValueNodeBuildInlinedUnmappedArgumentsElements (int mapped_count)
 
template<CreateArgumentsType type>
VirtualObjectBuildVirtualArgumentsObject ()
 
template<CreateArgumentsType type>
ValueNodeBuildAndAllocateArgumentsObject ()
 
bool CanAllocateSloppyArgumentElements ()
 
bool CanAllocateInlinedArgumentElements ()
 
MaybeReduceResult TryBuildInlinedAllocatedContext (compiler::MapRef map, compiler::ScopeInfoRef scope, int context_length)
 
template<Operation kOperation>
void BuildGenericUnaryOperationNode ()
 
template<Operation kOperation>
void BuildGenericBinaryOperationNode ()
 
template<Operation kOperation>
void BuildGenericBinarySmiOperationNode ()
 
template<Operation kOperation>
bool TryReduceCompareEqualAgainstConstant ()
 
template<Operation kOperation>
MaybeReduceResult TryFoldInt32UnaryOperation (ValueNode *value)
 
template<Operation kOperation>
MaybeReduceResult TryFoldInt32BinaryOperation (ValueNode *left, ValueNode *right)
 
template<Operation kOperation>
MaybeReduceResult TryFoldInt32BinaryOperation (ValueNode *left, int32_t cst_right)
 
template<Operation kOperation>
ReduceResult BuildInt32UnaryOperationNode ()
 
ReduceResult BuildTruncatingInt32BitwiseNotForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
template<Operation kOperation>
ReduceResult BuildInt32BinaryOperationNode ()
 
template<Operation kOperation>
ReduceResult BuildInt32BinarySmiOperationNode ()
 
template<Operation kOperation>
ReduceResult BuildTruncatingInt32BinaryOperationNodeForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
template<Operation kOperation>
ReduceResult BuildTruncatingInt32BinarySmiOperationNodeForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
template<Operation kOperation>
MaybeReduceResult TryFoldFloat64UnaryOperationForToNumber (TaggedToFloat64ConversionType conversion_type, ValueNode *value)
 
template<Operation kOperation>
MaybeReduceResult TryFoldFloat64BinaryOperationForToNumber (TaggedToFloat64ConversionType conversion_type, ValueNode *left, ValueNode *right)
 
template<Operation kOperation>
MaybeReduceResult TryFoldFloat64BinaryOperationForToNumber (TaggedToFloat64ConversionType conversion_type, ValueNode *left, double cst_right)
 
template<Operation kOperation>
ReduceResult BuildFloat64UnaryOperationNodeForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
template<Operation kOperation>
ReduceResult BuildFloat64BinaryOperationNodeForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
template<Operation kOperation>
ReduceResult BuildFloat64BinarySmiOperationNodeForToNumber (NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
 
template<Operation kOperation>
ReduceResult VisitUnaryOperation ()
 
template<Operation kOperation>
ReduceResult VisitBinaryOperation ()
 
template<Operation kOperation>
ReduceResult VisitBinarySmiOperation ()
 
ValueNodeBuildUnwrapStringWrapper (ValueNode *input)
 
ReduceResult BuildStringConcat (ValueNode *left, ValueNode *right)
 
ValueNodeBuildNewConsStringMap (ValueNode *left, ValueNode *right)
 
size_t StringLengthStaticLowerBound (ValueNode *string, int max_depth=2)
 
MaybeReduceResult TryBuildNewConsString (ValueNode *left, ValueNode *right, AllocationType allocation_type=AllocationType::kYoung)
 
template<Operation kOperation>
ReduceResult VisitCompareOperation ()
 
template<typename Function >
MaybeReduceResult TryReduceTypeOf (ValueNode *value, const Function &GetResult)
 
MaybeReduceResult TryReduceTypeOf (ValueNode *value)
 
void BeginLoopEffects (int loop_header)
 
void EndLoopEffects (int loop_header)
 
void MergeIntoFrameState (BasicBlock *block, int target)
 
void MergeDeadIntoFrameState (int target)
 
void MergeDeadLoopIntoFrameState (int target)
 
void MergeIntoInlinedReturnFrameState (BasicBlock *block)
 
bool HasValidInitialMap (compiler::JSFunctionRef new_target, compiler::JSFunctionRef constructor)
 
ValueNodeBuildTaggedEqual (ValueNode *lhs, ValueNode *rhs)
 
ValueNodeBuildTaggedEqual (ValueNode *lhs, RootIndex rhs_index)
 
BranchBuilder CreateBranchBuilder (BranchType jump_type=BranchType::kBranchIfTrue)
 
BranchBuilder CreateBranchBuilder (MaglevSubGraphBuilder *subgraph, MaglevSubGraphBuilder::Label *jump_label, BranchType jump_type=BranchType::kBranchIfTrue)
 
BranchResult BuildBranchIfRootConstant (BranchBuilder &builder, ValueNode *node, RootIndex root_index)
 
BranchResult BuildBranchIfToBooleanTrue (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfInt32ToBooleanTrue (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfIntPtrToBooleanTrue (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfFloat64ToBooleanTrue (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfFloat64IsHole (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfReferenceEqual (BranchBuilder &builder, ValueNode *lhs, ValueNode *rhs)
 
BranchResult BuildBranchIfInt32Compare (BranchBuilder &builder, Operation op, ValueNode *lhs, ValueNode *rhs)
 
BranchResult BuildBranchIfUint32Compare (BranchBuilder &builder, Operation op, ValueNode *lhs, ValueNode *rhs)
 
BranchResult BuildBranchIfUndefinedOrNull (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfUndetectable (BranchBuilder &builder, ValueNode *value)
 
BranchResult BuildBranchIfJSReceiver (BranchBuilder &builder, ValueNode *value)
 
BranchResult BuildBranchIfTrue (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfNull (BranchBuilder &builder, ValueNode *node)
 
BranchResult BuildBranchIfUndefined (BranchBuilder &builder, ValueNode *node)
 
BasicBlockBuildBranchIfReferenceEqual (ValueNode *lhs, ValueNode *rhs, BasicBlockRef *true_target, BasicBlockRef *false_target)
 
template<typename FCond , typename FTrue , typename FFalse >
ValueNodeSelect (FCond cond, FTrue if_true, FFalse if_false)
 
template<typename FCond , typename FTrue , typename FFalse >
MaybeReduceResult SelectReduction (FCond cond, FTrue if_true, FFalse if_false)
 
void MarkBranchDeadAndJumpIfNeeded (bool is_jump_taken)
 
void CalculatePredecessorCounts ()
 
compiler::FeedbackVectorRef feedback () const
 
const FeedbackNexus FeedbackNexusForOperand (int slot_operand_index) const
 
const FeedbackNexus FeedbackNexusForSlot (FeedbackSlot slot) const
 
compiler::BytecodeArrayRef bytecode () const
 
const compiler::BytecodeAnalysisbytecode_analysis () const
 
int parameter_count () const
 
int parameter_count_without_receiver () const
 
int register_count () const
 
KnownNodeAspectsknown_node_aspects ()
 
float GetCurrentCallFrequency ()
 
int argument_count () const
 
int argument_count_without_receiver () const
 
bool IsInsideLoop () const
 
int inline_exit_offset () const
 
void DecrementDeadPredecessorAndAccountForPeeling (uint32_t offset)
 
void InitializePredecessorCount (uint32_t offset, int amount)
 
void UpdatePredecessorCount (uint32_t offset, int diff)
 
uint32_t predecessor_count (uint32_t offset)
 
bool in_peeled_iteration () const
 
bool in_optimistic_peeling_iteration () const
 
bool is_loop_effect_tracking_enabled ()
 
bool is_loop_effect_tracking ()
 
int bailout_for_entrypoint ()
 
bool CanSpeculateCall () const
 
bool CanSpeculateCall (std::initializer_list< SpeculationMode > supported_modes) const
 
void MarkNodeDead (Node *node)
 

Static Private Member Functions

template<typename NodeT >
static constexpr UseReprHintRecording ShouldRecordUseReprHint ()
 
static BranchType NegateBranchType (BranchType jump_type)
 
static size_t fast_hash_combine (size_t seed, size_t h)
 
template<typename T >
static size_t gvn_hash_value (const T &in)
 
static size_t gvn_hash_value (const compiler::MapRef &map)
 
static size_t gvn_hash_value (const interpreter::Register &reg)
 
static size_t gvn_hash_value (const Representation &rep)
 
static size_t gvn_hash_value (const ExternalReference &ref)
 
static size_t gvn_hash_value (const PolymorphicAccessInfo &access_info)
 
template<typename T >
static size_t gvn_hash_value (const v8::internal::ZoneCompactSet< T > &vector)
 
template<typename T >
static size_t gvn_hash_value (const v8::internal::ZoneVector< T > &vector)
 

Private Attributes

LocalIsolate *const local_isolate_
 
MaglevCompilationUnit *const compilation_unit_
 
MaglevCallerDetailscaller_details_
 
compiler::JSHeapBrokerbroker_ = compilation_unit_->broker()
 
Graph *const graph_
 
compiler::BytecodeAnalysis bytecode_analysis_
 
interpreter::BytecodeArrayIterator iterator_
 
SourcePositionTableIterator source_position_iterator_
 
uint32_t * predecessor_count_
 
int peeled_iteration_count_ = 0
 
bool any_peeled_loop_ = false
 
bool allow_loop_peeling_
 
LoopEffectsloop_effects_ = nullptr
 
ZoneDeque< LoopEffects * > loop_effects_stack_
 
ZoneVector< intdecremented_predecessor_offsets_
 
BitVector loop_headers_to_peel_
 
bool in_prologue_ = true
 
BasicBlockcurrent_block_ = nullptr
 
std::optional< InterpretedDeoptFrameentry_stack_check_frame_
 
std::optional< DeoptFramelatest_checkpointed_frame_
 
SourcePosition current_source_position_
 
ForInState current_for_in_state = ForInState()
 
AllocationBlockcurrent_allocation_block_ = nullptr
 
BasicBlockRefjump_targets_
 
MergePointInterpreterFrameState ** merge_states_
 
InterpreterFrameState current_interpreter_frame_
 
compiler::FeedbackSource current_speculation_feedback_
 
SpeculationMode current_speculation_mode_
 
ValueNodeinlined_new_target_ = nullptr
 
bool is_turbolev_ = false
 
int entrypoint_
 
int inlining_id_ = SourcePosition::kNotInlined
 
int next_handler_table_index_ = 0
 
DeoptFrameScopecurrent_deopt_scope_ = nullptr
 
LazyDeoptResultLocationScopelazy_deopt_result_location_scope_ = nullptr
 
ZoneStack< HandlerTableEntrycatch_block_stack_
 
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores_
 

Static Private Attributes

static constexpr bool kLoopsMustBeEnteredThroughHeader = true
 

Detailed Description

Definition at line 202 of file maglev-graph-builder.h.

Member Typedef Documentation

◆ GetDeoptScopeCallback

◆ InitialCallback

◆ ProcessElementCallback

Definition at line 2132 of file maglev-graph-builder.h.

◆ TypeOfLiteralFlag

Member Enumeration Documentation

◆ BranchResult

Enumerator
kDefault 
kAlwaysTrue 
kAlwaysFalse 

Definition at line 2843 of file maglev-graph-builder.h.

2843  {
2844  kDefault,
2845  kAlwaysTrue,
2846  kAlwaysFalse,
2847  };

◆ BranchSpecializationMode

Enumerator
kDefault 
kAlwaysBoolean 

Definition at line 2842 of file maglev-graph-builder.h.

2842 { kDefault, kAlwaysBoolean };

◆ BranchType

Enumerator
kBranchIfTrue 
kBranchIfFalse 

Definition at line 2841 of file maglev-graph-builder.h.

2841 { kBranchIfTrue, kBranchIfFalse };

◆ ContextSlotMutability

◆ InferHasInPrototypeChainResult

◆ StringAtOOBMode

Enumerator
kElement 
kCharAt 

Definition at line 2152 of file maglev-graph-builder.h.

2152 { kElement, kCharAt };

◆ TrackObjectMode

Constructor & Destructor Documentation

◆ MaglevGraphBuilder()

v8::internal::maglev::MaglevGraphBuilder::MaglevGraphBuilder ( LocalIsolate local_isolate,
MaglevCompilationUnit compilation_unit,
Graph graph,
MaglevCallerDetails caller_details = nullptr 
)
explicit

Definition at line 951 of file maglev-graph-builder.cc.

958  graph_(graph),
959  bytecode_analysis_(bytecode().object(), zone(),
960  compilation_unit->osr_offset(), true),
961  iterator_(bytecode().object()),
962  source_position_iterator_(bytecode().SourcePositionTable(broker())),
963  allow_loop_peeling_(v8_flags.maglev_loop_peeling),
968  // Add an extra jump_target slot for the inline exit if needed.
969  jump_targets_(zone()->AllocateArray<BasicBlockRef>(
970  bytecode().length() + (is_inline() ? 1 : 0))),
971  // Overallocate merge_states_ by one to allow always looking up the
972  // next offset. This overallocated slot can also be used for the inline
973  // exit when needed.
974  merge_states_(zone()->AllocateArray<MergePointInterpreterFrameState*>(
975  bytecode().length() + 1)),
979  : compilation_unit_->zone()->New<KnownNodeAspects>(
982  : VirtualObjectList()),
986  : 0),
989  memset(merge_states_, 0,
990  (bytecode().length() + 1) * sizeof(InterpreterFrameState*));
991  // Default construct basic block refs.
992  // TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
993  for (int i = 0; i < bytecode().length(); ++i) {
994  new (&jump_targets_[i]) BasicBlockRef();
995  }
996 
997  if (is_inline()) {
1000  // The allocation/initialisation logic here relies on inline_exit_offset
1001  // being the offset one past the end of the bytecode.
1003  merge_states_[inline_exit_offset()] = nullptr;
1004  new (&jump_targets_[inline_exit_offset()]) BasicBlockRef();
1008  }
1011  }
1012 
1016  graph_->is_osr());
1017  if (compilation_unit_->is_osr()) {
1018  CHECK(!is_inline());
1019 
1020  // Make sure that we're at a valid OSR entrypoint.
1021  //
1022  // This is also a defense-in-depth check to make sure that we're not
1023  // compiling invalid bytecode if the OSR offset is wrong (e.g. because it
1024  // belongs to different bytecode).
1025  //
1026  // OSR'ing into the middle of a loop is currently not supported. There
1027  // should not be any issue with OSR'ing outside of loops, just we currently
1028  // dont do it...
1029  interpreter::BytecodeArrayIterator it(bytecode().object());
1030  it.AdvanceTo(compilation_unit_->osr_offset().ToInt());
1031  CHECK(it.CurrentBytecodeIsValidOSREntry());
1032  CHECK_EQ(entrypoint_, it.GetJumpTargetOffset());
1033 
1035 
1036  if (v8_flags.trace_maglev_graph_building) {
1037  std::cout << "- Non-standard entrypoint @" << entrypoint_
1038  << " by OSR from @" << compilation_unit_->osr_offset().ToInt()
1039  << std::endl;
1040  }
1041  }
1043 
1045 }
static constexpr BytecodeOffset None()
Definition: utils.h:679
constexpr int ToInt() const
Definition: utils.h:677
T * New(Args &&... args)
Definition: zone.h:108
SourcePositionTableIterator source_position_iterator_
compiler::JSHeapBroker * broker() const
interpreter::BytecodeArrayIterator iterator_
compiler::BytecodeArrayRef bytecode() const
MergePointInterpreterFrameState ** merge_states_
ZoneStack< HandlerTableEntry > catch_block_stack_
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores_
MaglevCompilationUnit * compilation_unit() const
MaglevCallerDetails * caller_details() const
MaglevCompilationUnit *const compilation_unit_
V8_EXPORT_PRIVATE FlagValues v8_flags
#define CHECK_IMPLIES(lhs, rhs)
#define CHECK(condition)
Definition: logging.h:124
#define DCHECK_NOT_NULL(val)
Definition: logging.h:491
#define CHECK_EQ(lhs, rhs)
#define DCHECK_EQ(v1, v2)
Definition: logging.h:484
#define DCHECK_GT(v1, v2)
Definition: logging.h:486
ZoneUnorderedMap< KnownNodeAspects::LoadedContextSlotsKey, Node * > unobserved_context_slot_stores

References v8::internal::interpreter::BytecodeArrayIterator::AdvanceTo(), bytecode(), CalculatePredecessorCounts(), caller_details(), CHECK, CHECK_EQ, CHECK_IMPLIES, compilation_unit(), compilation_unit_, v8::internal::interpreter::BytecodeArrayIterator::CurrentBytecodeIsValidOSREntry(), DCHECK_EQ, DCHECK_GT, DCHECK_NOT_NULL, entrypoint_, v8::internal::interpreter::BytecodeArrayIterator::GetJumpTargetOffset(), graph_, v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::maglev::MaglevCompilationUnit::info(), inline_exit_offset(), v8::internal::maglev::MaglevCompilationUnit::inlining_depth(), is_inline(), v8::internal::maglev::MaglevCompilationUnit::is_osr(), v8::internal::maglev::Graph::is_osr(), iterator_, jump_targets_, v8::internal::length, v8::internal::compiler::BytecodeArrayRef::length(), v8::internal::maglev::MaglevCallerDetails::loop_effects, loop_effects_, loop_effects_stack_, merge_states_, v8::internal::BytecodeOffset::None(), v8::internal::maglev::MaglevCompilationUnit::osr_offset(), v8::internal::BytecodeOffset::ToInt(), v8::internal::maglev::MaglevCompilationInfo::toplevel_osr_offset(), v8::internal::maglev::MaglevCallerDetails::unobserved_context_slot_stores, unobserved_context_slot_stores_, and v8::internal::v8_flags.

+ Here is the call graph for this function:

Member Function Documentation

◆ AddDeoptUse() [1/2]

void v8::internal::maglev::MaglevGraphBuilder::AddDeoptUse ( ValueNode node)
inlineprivate

Definition at line 2687 of file maglev-graph-builder.h.

2687  {
2688  if (node == nullptr) return;
2689  DCHECK(!node->Is<VirtualObject>());
2690  if (InlinedAllocation* alloc = node->TryCast<InlinedAllocation>()) {
2691  VirtualObject* vobject =
2693  if (vobject) {
2694  CHECK_NOT_NULL(vobject);
2695  AddDeoptUse(vobject);
2696  // Add an escaping use for the allocation.
2697  AddNonEscapingUses(alloc, 1);
2698  } else {
2699  DCHECK(alloc->is_returned_value_from_inline_call());
2700  }
2701  alloc->add_use();
2702  } else {
2703  node->add_use();
2704  }
2705  }
void AddNonEscapingUses(InlinedAllocation *allocation, int use_count)
VirtualObject * FindAllocatedWith(const InlinedAllocation *allocation) const
Definition: maglev-ir.h:6197
DCHECK(IsNull(value)||IsNativeContext(value)||value==Smi::uninitialized_deserialization_value())
#define CHECK_NOT_NULL(val)

References v8::internal::maglev::ValueNode::add_use(), CHECK_NOT_NULL, DCHECK, v8::internal::maglev::NodeBase::Is(), and v8::internal::maglev::NodeBase::TryCast().

Referenced by AddInlinedArgumentsToDeoptFrame(), GetDeoptFrameForEntryStackCheck(), GetDeoptFrameForLazyDeoptHelper(), and GetLatestCheckpointedFrame().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AddDeoptUse() [2/2]

void v8::internal::maglev::MaglevGraphBuilder::AddDeoptUse ( VirtualObject alloc)
private

Definition at line 13672 of file maglev-graph-builder.cc.

13672  {
13673  vobject->ForEachInput([&](ValueNode* value) {
13674  if (InlinedAllocation* nested_allocation =
13675  value->TryCast<InlinedAllocation>()) {
13676  VirtualObject* nested_object =
13677  current_interpreter_frame_.virtual_objects().FindAllocatedWith(
13678  nested_allocation);
13679  CHECK_NOT_NULL(nested_object);
13680  AddDeoptUse(nested_object);
13681  } else if (!IsConstantNode(value->opcode()) &&
13682  value->opcode() != Opcode::kArgumentsElements &&
13683  value->opcode() != Opcode::kArgumentsLength &&
13684  value->opcode() != Opcode::kRestLength) {
13685  AddDeoptUse(value);
13686  }
13687  });
13688 }
constexpr bool IsConstantNode(Opcode opcode)
Definition: maglev-ir.h:510
else if(instr->arch_opcode()==kRiscvCmpZero)
return value
Definition: map-inl.h:912

References v8::internal::maglev::VirtualObject::ForEachInput(), v8::internal::if(), v8::internal::maglev::IsConstantNode(), and v8::internal::value.

+ Here is the call graph for this function:

◆ AddInitializedNodeToGraph()

void v8::internal::maglev::MaglevGraphBuilder::AddInitializedNodeToGraph ( Node node)
inlineprivate

Definition at line 978 of file maglev-graph-builder.h.

978  {
979  // VirtualObjects should never be add to the Maglev graph.
980  DCHECK(!node->Is<VirtualObject>());
981  node_buffer().push_back(node);
982  node->set_owner(current_block_);
983  if (has_graph_labeller())
985  BytecodeOffset(iterator_.current_offset()),
987  if (v8_flags.trace_maglev_graph_building) {
988  std::cout << " " << node << " "
989  << PrintNodeLabel(graph_labeller(), node) << ": "
990  << PrintNode(graph_labeller(), node) << std::endl;
991  }
992 #ifdef DEBUG
993  new_nodes_.insert(node);
994 #endif
995  }
void push_back(const T &value)
MaglevGraphLabeller * graph_labeller() const
void RegisterNode(const NodeBase *node, const MaglevCompilationUnit *unit, BytecodeOffset bytecode_offset, SourcePosition position)
void PrintNode(const Node *node, std::ostream &os, int depth, int indentation=0)
Definition: node.cc:353

References compilation_unit_, current_block_, v8::internal::interpreter::BytecodeArrayIterator::current_offset(), current_source_position_, v8::internal::DCHECK(), graph_labeller(), has_graph_labeller(), v8::internal::maglev::NodeBase::Is(), iterator_, node_buffer(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::ZoneVector< T >::push_back(), v8::internal::maglev::MaglevGraphLabeller::RegisterNode(), v8::internal::maglev::NodeBase::set_owner(), and v8::internal::v8_flags.

Referenced by AttachExtraInfoAndAddToGraph(), and Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AddInlinedArgumentsToDeoptFrame()

DeoptFrame * v8::internal::maglev::MaglevGraphBuilder::AddInlinedArgumentsToDeoptFrame ( DeoptFrame deopt_frame,
const MaglevCompilationUnit unit,
ValueNode closure,
base::Vector< ValueNode * >  args 
)

Definition at line 1324 of file maglev-graph-builder.cc.

1326  {
1327  // Only create InlinedArgumentsDeoptFrame if we have a mismatch between
1328  // formal parameter and arguments count.
1329  if (static_cast<int>(args.size()) != unit->parameter_count()) {
1330  deopt_frame = zone()->New<InlinedArgumentsDeoptFrame>(
1331  *unit, BytecodeOffset(iterator_.current_offset()), closure, args,
1332  deopt_frame);
1333  AddDeoptUse(closure);
1334  for (ValueNode* arg : deopt_frame->as_inlined_arguments().arguments()) {
1335  AddDeoptUse(arg);
1336  }
1337  }
1338  return deopt_frame;
1339 }
V8_BASE_EXPORT int const char va_list args
Definition: strings.h:23

References AddDeoptUse(), v8::base::args, v8::internal::maglev::InlinedArgumentsDeoptFrame::arguments(), v8::internal::maglev::DeoptFrame::as_inlined_arguments(), v8::internal::interpreter::BytecodeArrayIterator::current_offset(), iterator_, v8::internal::Zone::New(), v8::internal::maglev::MaglevCompilationUnit::parameter_count(), and zone().

Referenced by v8::internal::maglev::MaglevInliner::BuildInlineFunction(), and GetDeoptFrameForEagerCall().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AddNewCallNode()

template<typename CallNode , typename... Args>
CallNode * v8::internal::maglev::MaglevGraphBuilder::AddNewCallNode ( const CallArguments args,
Args &&...  extra_args 
)
private

Definition at line 10908 of file maglev-graph-builder.cc.

10909  {
10910  size_t input_count = args.count_with_receiver() + CallNode::kFixedInputCount;
10911  return AddNewNode<CallNode>(
10912  input_count,
10913  [&](CallNode* call) {
10914  int arg_index = 0;
10915  call->set_arg(arg_index++,
10916  GetTaggedValue(GetValueOrUndefined(args.receiver())));
10917  for (size_t i = 0; i < args.count(); ++i) {
10918  call->set_arg(arg_index++, GetTaggedValue(args[i]));
10919  }
10920  },
10921  std::forward<Args>(extra_args)...);
10922 }
ValueNode * GetTaggedValue(ValueNode *value, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
ValueNode * GetValueOrUndefined(ValueNode *maybe_value)

References v8::base::args, v8::internal::anonymous_namespace{json-stringifier.cc}::for(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, and v8::internal::compiler::turboshaft::detail::input_count().

+ Here is the call graph for this function:

◆ AddNewNode() [1/2]

template<typename NodeT , typename Function , typename... Args>
NodeT* v8::internal::maglev::MaglevGraphBuilder::AddNewNode ( size_t  input_count,
Function &&  post_create_input_initializer,
Args &&...  args 
)
inlineprivate

Definition at line 1001 of file maglev-graph-builder.h.

1002  {
1003  NodeT* node =
1004  NodeBase::New<NodeT>(zone(), input_count, std::forward<Args>(args)...);
1005  post_create_input_initializer(node);
1006  return AttachExtraInfoAndAddToGraph(node);
1007  }
NodeTMixin< Node, Derived > NodeT
Definition: maglev-ir.h:3043

References v8::base::args, AttachExtraInfoAndAddToGraph(), v8::internal::compiler::turboshaft::detail::input_count(), and zone().

Referenced by BuildFloat64BinaryOperationNodeForToNumber(), BuildFloat64BinarySmiOperationNodeForToNumber(), BuildGenericBinaryOperationNode(), BuildGenericBinarySmiOperationNode(), BuildGenericUnaryOperationNode(), BuildTruncatingInt32BinaryOperationNodeForToNumber(), and BuildTruncatingInt32BinarySmiOperationNodeForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AddNewNode() [2/2]

template<typename NodeT , typename... Args>
NodeT* v8::internal::maglev::MaglevGraphBuilder::AddNewNode ( std::initializer_list< ValueNode * >  inputs,
Args &&...  args 
)
inlineprivate

Definition at line 1109 of file maglev-graph-builder.h.

1109  {
1110  static_assert(IsFixedInputNode<NodeT>());
1111  if constexpr (Node::participate_in_cse(Node::opcode_of<NodeT>)) {
1112  if (v8_flags.maglev_cse) {
1113  return AddNewNodeOrGetEquivalent<NodeT>(inputs,
1114  std::forward<Args>(args)...);
1115  }
1116  }
1117  NodeT* node = NodeBase::New<NodeT>(zone(), inputs.size(),
1118  std::forward<Args>(args)...);
1119  SetNodeInputs(node, inputs);
1120  return AttachExtraInfoAndAddToGraph(node);
1121  }
void SetNodeInputs(NodeT *node, std::initializer_list< ValueNode * > inputs)
static constexpr bool participate_in_cse(Opcode op)
Definition: maglev-ir.h:2591

References v8::base::args, AttachExtraInfoAndAddToGraph(), v8::internal::maglev::Node::participate_in_cse(), SetNodeInputs(), v8::internal::v8_flags, and zone().

+ Here is the call graph for this function:

◆ AddNewNodeOrGetEquivalent()

template<typename NodeT , typename... Args>
NodeT* v8::internal::maglev::MaglevGraphBuilder::AddNewNodeOrGetEquivalent ( std::initializer_list< ValueNode * >  raw_inputs,
Args &&...  args 
)
inlineprivate

Definition at line 1010 of file maglev-graph-builder.h.

1011  {
1012  DCHECK(v8_flags.maglev_cse);
1013  static constexpr Opcode op = Node::opcode_of<NodeT>;
1014  static_assert(Node::participate_in_cse(op));
1015  using options_result =
1016  std::invoke_result_t<decltype(&NodeT::options), const NodeT>;
1017  static_assert(std::is_assignable_v<options_result, std::tuple<Args...>>,
1018  "Instruction participating in CSE needs options() returning "
1019  "a tuple matching the constructor arguments");
1020  static_assert(IsFixedInputNode<NodeT>());
1021  static_assert(NodeT::kInputCount <= 3);
1022 
1023  std::array<ValueNode*, NodeT::kInputCount> inputs;
1024  // Nodes with zero input count don't have kInputTypes defined.
1025  if constexpr (NodeT::kInputCount > 0) {
1026  int i = 0;
1027  constexpr UseReprHintRecording hint = ShouldRecordUseReprHint<NodeT>();
1028  for (ValueNode* raw_input : raw_inputs) {
1029  // TODO(marja): Here we might already have the empty type for the
1030  // node. Generate a deopt and make callers handle it.
1031  inputs[i] = ConvertInputTo<hint>(raw_input, NodeT::kInputTypes[i]);
1032  i++;
1033  }
1034  if constexpr (IsCommutativeNode(Node::opcode_of<NodeT>)) {
1035  static_assert(NodeT::kInputCount == 2);
1036  if ((IsConstantNode(inputs[0]->opcode()) || inputs[0] > inputs[1]) &&
1037  !IsConstantNode(inputs[1]->opcode())) {
1038  std::swap(inputs[0], inputs[1]);
1039  }
1040  }
1041  }
1042 
1043  uint32_t value_number;
1044  {
1045  size_t tmp_value_number = base::hash_value(op);
1046  (
1047  [&] {
1048  tmp_value_number =
1049  fast_hash_combine(tmp_value_number, gvn_hash_value(args));
1050  }(),
1051  ...);
1052  for (const auto& inp : inputs) {
1053  tmp_value_number =
1054  fast_hash_combine(tmp_value_number, base::hash_value(inp));
1055  }
1056  value_number = static_cast<uint32_t>(tmp_value_number);
1057  }
1058 
1059  auto exists = known_node_aspects().available_expressions.find(value_number);
1060  if (exists != known_node_aspects().available_expressions.end()) {
1061  auto candidate = exists->second.node;
1062  const bool sanity_check =
1063  candidate->Is<NodeT>() &&
1064  static_cast<size_t>(candidate->input_count()) == inputs.size();
1065  DCHECK_IMPLIES(sanity_check,
1067  candidate->properties()) == candidate->properties());
1068  const bool epoch_check =
1069  !Node::needs_epoch_check(op) ||
1070  known_node_aspects().effect_epoch() <= exists->second.effect_epoch;
1071  if (sanity_check && epoch_check) {
1072  if (static_cast<NodeT*>(candidate)->options() ==
1073  std::tuple{std::forward<Args>(args)...}) {
1074  int i = 0;
1075  for (const auto& inp : inputs) {
1076  if (inp != candidate->input(i).node()) {
1077  break;
1078  }
1079  i++;
1080  }
1081  if (static_cast<size_t>(i) == inputs.size()) {
1082  return static_cast<NodeT*>(candidate);
1083  }
1084  }
1085  }
1086  if (!epoch_check) {
1087  known_node_aspects().available_expressions.erase(exists);
1088  }
1089  }
1090  NodeT* node = NodeBase::New<NodeT>(zone(), inputs.size(),
1091  std::forward<Args>(args)...);
1092  int i = 0;
1093  for (ValueNode* input : inputs) {
1094  DCHECK_NOT_NULL(input);
1095  node->set_input(i++, input);
1096  }
1097  DCHECK_EQ(node->options(), std::tuple{std::forward<Args>(args)...});
1098  uint32_t epoch = Node::needs_epoch_check(op)
1102  known_node_aspects().available_expressions[value_number] = {node, epoch};
1103  }
1104  return AttachExtraInfoAndAddToGraph(node);
1105  }
static size_t fast_hash_combine(size_t seed, size_t h)
static constexpr bool needs_epoch_check(Opcode op)
Definition: maglev-ir.h:2599
size_t hash_value(unsigned long v)
Definition: hashing.h:209
constexpr OpProperties StaticPropertiesForOpcode(Opcode opcode)
Definition: maglev-ir.h:11851
constexpr bool IsCommutativeNode(Opcode opcode)
Definition: maglev-ir.h:514
#define DCHECK_IMPLIES(v1, v2)
Definition: logging.h:492
ZoneMap< uint32_t, AvailableExpression > available_expressions

References v8::base::args, AttachExtraInfoAndAddToGraph(), v8::internal::maglev::KnownNodeAspects::available_expressions, v8::internal::DCHECK(), DCHECK_EQ, DCHECK_IMPLIES, DCHECK_NOT_NULL, v8::internal::maglev::KnownNodeAspects::effect_epoch(), fast_hash_combine(), gvn_hash_value(), v8::base::hash_value(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::maglev::IsCommutativeNode(), v8::internal::maglev::IsConstantNode(), v8::internal::maglev::KnownNodeAspects::kEffectEpochForPureInstructions, v8::internal::maglev::KnownNodeAspects::kEffectEpochOverflow, known_node_aspects(), v8::internal::maglev::Node::needs_epoch_check(), v8::internal::maglev::Node::participate_in_cse(), v8::internal::maglev::StaticPropertiesForOpcode(), v8::internal::v8_flags, and zone().

+ Here is the call graph for this function:

◆ AddNonEscapingUses()

void v8::internal::maglev::MaglevGraphBuilder::AddNonEscapingUses ( InlinedAllocation allocation,
int  use_count 
)
private

Definition at line 13666 of file maglev-graph-builder.cc.

13667  {
13668  if (!v8_flags.maglev_escape_analysis) return;
13669  allocation->AddNonEscapingUses(use_count);
13670 }

References v8::internal::maglev::InlinedAllocation::AddNonEscapingUses(), and v8::internal::v8_flags.

Referenced by TryBuildStoreTaggedFieldToAllocation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ argument_count()

int v8::internal::maglev::MaglevGraphBuilder::argument_count ( ) const
inlineprivate

Definition at line 3139 of file maglev-graph-builder.h.

3139  {
3140  DCHECK(is_inline());
3141  return static_cast<int>(caller_details_->arguments.size());
3142  }

References DCHECK.

Referenced by GetInlinedArgument().

+ Here is the caller graph for this function:

◆ argument_count_without_receiver()

int v8::internal::maglev::MaglevGraphBuilder::argument_count_without_receiver ( ) const
inlineprivate

Definition at line 3143 of file maglev-graph-builder.h.

3143 { return argument_count() - 1; }

◆ AttachDeoptCheckpoint()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::AttachDeoptCheckpoint ( NodeT node)
inlineprivate

Definition at line 1156 of file maglev-graph-builder.h.

1156  {
1157  if constexpr (NodeT::kProperties.is_deopt_checkpoint()) {
1158  node->SetEagerDeoptInfo(zone(), GetLatestCheckpointedFrame());
1159  }
1160  }

References GetLatestCheckpointedFrame(), and zone().

Referenced by AttachExtraInfoAndAddToGraph().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AttachEagerDeoptInfo()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::AttachEagerDeoptInfo ( NodeT node)
inlineprivate

Definition at line 1163 of file maglev-graph-builder.h.

1163  {
1164  if constexpr (NodeT::kProperties.can_eager_deopt()) {
1165  node->SetEagerDeoptInfo(zone(), GetLatestCheckpointedFrame(),
1167  }
1168  }
compiler::FeedbackSource current_speculation_feedback_

References current_speculation_feedback_, GetLatestCheckpointedFrame(), and zone().

Referenced by AttachExtraInfoAndAddToGraph().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AttachExceptionHandlerInfo()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::AttachExceptionHandlerInfo ( NodeT node)
inlineprivate

Definition at line 1188 of file maglev-graph-builder.h.

1188  {
1189  if constexpr (NodeT::kProperties.can_throw()) {
1190  CatchBlockDetails catch_block = GetCurrentTryCatchBlock();
1191  if (catch_block.ref) {
1192  if (!catch_block.exception_handler_was_used) {
1193  // Attach an empty live exception handler to mark that there's a
1194  // matching catch but we'll lazy deopt if we ever throw.
1195  new (node->exception_handler_info())
1196  ExceptionHandlerInfo(ExceptionHandlerInfo::kLazyDeopt);
1197  DCHECK(node->exception_handler_info()->HasExceptionHandler());
1198  DCHECK(node->exception_handler_info()->ShouldLazyDeopt());
1199  if constexpr (std ::is_same_v<NodeT, CallKnownJSFunction>) {
1201  // Ensure that we always have the handler of inline call
1202  // candidates.
1204  node->exception_handler_info());
1205  }
1206  }
1207  return;
1208  }
1209 
1211  if (catch_block.block_already_exists) {
1213  // If we are inlining a function non-eagerly and we are not inside a
1214  // try block, then the catch block already exists.
1215  new (node->exception_handler_info()) ExceptionHandlerInfo(
1216  catch_block.ref->block_ptr(), catch_block.deopt_frame_distance);
1217  } else {
1218  // If we are inside a try block for the current builder or if we are
1219  // inside an eager inlined call inside a try block, the catch basic
1220  // block doesn't exist yet, use the ref-list mechanism.
1221  new (node->exception_handler_info()) ExceptionHandlerInfo(
1222  catch_block.ref, catch_block.deopt_frame_distance);
1223  }
1224 
1225  DCHECK(node->exception_handler_info()->HasExceptionHandler());
1226  DCHECK(!node->exception_handler_info()->ShouldLazyDeopt());
1227 
1228  current_block_->AddExceptionHandler(node->exception_handler_info());
1229 
1230  if (IsInsideTryBlock()) {
1231  // Merge the current state into the handler state.
1232  auto state = GetCatchBlockFrameState();
1233  DCHECK_NOT_NULL(state);
1234  state->MergeThrow(this, compilation_unit_,
1237  }
1238  } else {
1239  // Patch no exception handler marker.
1240  // TODO(victorgomes): Avoid allocating exception handler data in this
1241  // case.
1242  new (node->exception_handler_info()) ExceptionHandlerInfo();
1243  DCHECK(!node->exception_handler_info()->HasExceptionHandler());
1244  if constexpr (std ::is_same_v<NodeT, CallKnownJSFunction>) {
1246  // Ensure that we always have the handler of inline call candidates.
1247  current_block_->AddExceptionHandler(node->exception_handler_info());
1248  }
1249  }
1250  }
1251  }
1252  }
void AddExceptionHandler(ExceptionHandlerInfo *handler)
MergePointInterpreterFrameState * GetCatchBlockFrameState()

References v8::internal::maglev::BasicBlock::AddExceptionHandler(), v8::internal::maglev::CatchBlockDetails::block_already_exists, v8::internal::maglev::BasicBlockRef::block_ptr(), compilation_unit_, current_block_, current_interpreter_frame_, v8::internal::DCHECK(), DCHECK_IMPLIES, DCHECK_NOT_NULL, v8::internal::maglev::CatchBlockDetails::deopt_frame_distance, v8::internal::maglev::CatchBlockDetails::exception_handler_was_used, GetCatchBlockFrameState(), GetCurrentTryCatchBlock(), is_inline(), is_non_eager_inlining_enabled(), IsInsideTryBlock(), v8::internal::maglev::ExceptionHandlerInfo::kLazyDeopt, v8::internal::maglev::InterpreterFrameState::known_node_aspects(), v8::internal::maglev::CatchBlockDetails::ref, and v8::internal::maglev::InterpreterFrameState::virtual_objects().

Referenced by AttachExtraInfoAndAddToGraph().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AttachExtraInfoAndAddToGraph()

template<typename NodeT >
NodeT* v8::internal::maglev::MaglevGraphBuilder::AttachExtraInfoAndAddToGraph ( NodeT node)
inlineprivate

Definition at line 1141 of file maglev-graph-builder.h.

1141  {
1142  static_assert(NodeT::kProperties.is_deopt_checkpoint() +
1143  NodeT::kProperties.can_eager_deopt() +
1144  NodeT::kProperties.can_lazy_deopt() <=
1145  1);
1146  AttachDeoptCheckpoint(node);
1147  AttachEagerDeoptInfo(node);
1148  AttachLazyDeoptInfo(node);
1151  MarkPossibleSideEffect(node);
1152  return node;
1153  }

References AddInitializedNodeToGraph(), AttachDeoptCheckpoint(), AttachEagerDeoptInfo(), AttachExceptionHandlerInfo(), AttachLazyDeoptInfo(), and MarkPossibleSideEffect().

Referenced by AddNewNode(), and AddNewNodeOrGetEquivalent().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ AttachLazyDeoptInfo()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::AttachLazyDeoptInfo ( NodeT node)
inlineprivate

Definition at line 1171 of file maglev-graph-builder.h.

1171  {
1172  if constexpr (NodeT::kProperties.can_lazy_deopt()) {
1173  interpreter::Register result_location;
1174  int result_size;
1178  } else {
1179  std::tie(result_location, result_size) = GetResultLocationAndSize();
1180  }
1181  new (node->lazy_deopt_info()) LazyDeoptInfo(
1182  zone(), GetDeoptFrameForLazyDeopt(result_location, result_size),
1183  result_location, result_size, current_speculation_feedback_);
1184  }
1185  }
DeoptFrame GetDeoptFrameForLazyDeopt(interpreter::Register result_location, int result_size)
LazyDeoptResultLocationScope * lazy_deopt_result_location_scope_
std::pair< interpreter::Register, int > GetResultLocationAndSize() const

References current_speculation_feedback_, GetDeoptFrameForLazyDeopt(), GetResultLocationAndSize(), lazy_deopt_result_location_scope_, v8::internal::maglev::MaglevGraphBuilder::LazyDeoptResultLocationScope::result_location(), v8::internal::maglev::MaglevGraphBuilder::LazyDeoptResultLocationScope::result_size(), and zone().

Referenced by AttachExtraInfoAndAddToGraph().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ bailout_for_entrypoint()

int v8::internal::maglev::MaglevGraphBuilder::bailout_for_entrypoint ( )
inlineprivate

Definition at line 3287 of file maglev-graph-builder.h.

3287  {
3288  if (!graph_->is_osr()) return kFunctionEntryBytecodeOffset;
3290  }
constexpr int kFunctionEntryBytecodeOffset
Definition: globals.h:845

References v8::internal::maglev::Graph::is_osr(), v8::internal::kFunctionEntryBytecodeOffset, v8::internal::compiler::BytecodeAnalysis::osr_bailout_id(), and v8::internal::BytecodeOffset::ToInt().

Referenced by GetDeoptFrameForEntryStackCheck().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BeginLoopEffects()

void v8::internal::maglev::MaglevGraphBuilder::BeginLoopEffects ( int  loop_header)
private

Definition at line 14428 of file maglev-graph-builder.cc.

14428  {
14429  loop_effects_stack_.push_back(zone()->New<LoopEffects>(loop_header, zone()));
14431 }

Referenced by VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ broker()

compiler::JSHeapBroker* v8::internal::maglev::MaglevGraphBuilder::broker ( ) const
inline

Definition at line 366 of file maglev-graph-builder.h.

366 { return broker_; }

References broker_.

Referenced by Build(), BuildCheckMaps(), BuildCheckSmi(), BuildCompareMaps(), BuildLoadField(), BuildLoadFixedArrayElement(), BuildLoadJSArrayLength(), BuildLoadJSFunctionContext(), BuildLoadJSFunctionFeedbackCell(), BuildNewConsStringMap(), BuildRegisterFrameInitialization(), BuildStoreMap(), BuildTestUndetectable(), BuildTransitionElementsKindAndCompareMaps(), BuildTransitionElementsKindOrCheckMap(), CanTreatHoleAsUndefined(), CheckContextExtensions(), CheckType(), CheckTypes(), EnsureType(), GetOrCreateInfoFor(), GetType(), HasDisjointType(), HaveDisjointTypes(), LoadAndCacheContextSlot(), v8::internal::maglev::MergePointInterpreterFrameState::MergeLoopValue(), v8::internal::maglev::MergePointInterpreterFrameState::MergeValue(), v8::internal::maglev::MergePointInterpreterFrameState::MergeVirtualObjectValue(), SetKnownValue(), StoreAndCacheContextSlot(), StringLengthStaticLowerBound(), TryBuildNamedAccess(), TryBuildPropertyCellLoad(), TryBuildPropertyCellStore(), TryBuildPropertyGetterCall(), TryBuildPropertyLoad(), TryBuildPropertyStore(), TryBuildScriptContextConstantLoad(), TryBuildStoreField(), TryFoldLoadConstantDataField(), TryFoldLoadConstantDoubleField(), TryFoldLoadDictPrototypeConstant(), TryGetConstant(), v8::internal::maglev::MergePointInterpreterFrameState::TryMergeLoop(), TryReduceCompareEqualAgainstConstant(), TrySpecializeLoadContextSlot(), TrySpecializeLoadContextSlotToFunctionContext(), TrySpecializeStoreContextSlot(), and VisitBinaryOperation().

+ Here is the caller graph for this function:

◆ Build()

void v8::internal::maglev::MaglevGraphBuilder::Build ( )
inline

Definition at line 229 of file maglev-graph-builder.h.

229  {
230  DCHECK(!is_inline());
231 
233  current_source_position_ = SourcePosition(
234  compilation_unit_->shared_function_info().StartPosition(),
235  inlining_id_);
236 
237  StartPrologue();
238  for (int i = 0; i < parameter_count(); i++) {
239  // TODO(v8:7700): Consider creating InitialValue nodes lazily.
240  InitialValue* v = AddNewNode<InitialValue>(
242  DCHECK_EQ(graph()->parameters().size(), static_cast<size_t>(i));
243  graph()->parameters().push_back(v);
244  SetArgument(i, v);
245  }
246 
248 
249  // Don't use the AddNewNode helper for the function entry stack check, so
250  // that we can set a custom deopt frame on it.
251  FunctionEntryStackCheck* function_entry_stack_check =
252  NodeBase::New<FunctionEntryStackCheck>(zone(), 0);
253  new (function_entry_stack_check->lazy_deopt_info()) LazyDeoptInfo(
255  interpreter::Register::invalid_value(), 0, compiler::FeedbackSource());
256  AddInitializedNodeToGraph(function_entry_stack_check);
257 
259  EndPrologue();
260  in_prologue_ = false;
261 
262  compiler::ScopeInfoRef scope_info =
264  if (scope_info.HasOuterScopeInfo()) {
265  scope_info = scope_info.OuterScopeInfo(broker());
266  CHECK(scope_info.HasContext());
267  graph()->record_scope_info(GetContext(), scope_info);
268  }
269  if (compilation_unit_->is_osr()) {
271  }
272 
273  BuildBody();
274  }
ScopeInfoRef OuterScopeInfo(JSHeapBroker *broker) const
Definition: heap-refs.cc:1746
ScopeInfoRef scope_info(JSHeapBroker *broker) const
Definition: heap-refs.cc:2474
static constexpr Register FromParameterIndex(int index)
static constexpr Register invalid_value()
void record_scope_info(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
Definition: maglev-graph.h:199
ZoneVector< InitialValue * > & parameters()
Definition: maglev-graph.h:142
compiler::SharedFunctionInfoRef shared_function_info() const
void BuildRegisterFrameInitialization(ValueNode *context=nullptr, ValueNode *closure=nullptr, ValueNode *new_target=nullptr)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enables Turboshaft s StaticAssert and CheckTurboshaftTypeOf operations Wasm code into JS functions via the JS to Wasm wrappers are still inlined in TurboFan For controlling whether to at see turbo inline js wasm calls enable Turboshaft s loop unrolling enable an additional Turboshaft phase that performs optimizations based on type information enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps trace Turboshaft s if else to switch reducer invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the preconfigured old space size(in Mbytes)") DEFINE_INT(random_gc_interval

References AddInitializedNodeToGraph(), broker(), BuildBody(), BuildMergeStates(), BuildRegisterFrameInitialization(), CHECK, compilation_unit_, current_source_position_, v8::internal::DCHECK(), DCHECK_EQ, EndPrologue(), v8::internal::interpreter::Register::FromParameterIndex(), GetContext(), GetDeoptFrameForEntryStackCheck(), graph(), v8::internal::compiler::ScopeInfoRef::HasContext(), v8::internal::compiler::ScopeInfoRef::HasOuterScopeInfo(), in_prologue_, inlining_id_, v8::internal::interpreter::Register::invalid_value(), is_inline(), v8::internal::maglev::MaglevCompilationUnit::is_osr(), v8::internal::SourcePosition::kNotInlined, OsrAnalyzePrequel(), v8::internal::compiler::ScopeInfoRef::OuterScopeInfo(), parameter_count(), v8::internal::maglev::Graph::parameters(), v8::internal::maglev::Graph::record_scope_info(), v8::internal::compiler::SharedFunctionInfoRef::scope_info(), SetArgument(), v8::internal::maglev::MaglevCompilationUnit::shared_function_info(), size(), StartPrologue(), and zone().

Referenced by v8::internal::maglev::MaglevCompiler::Compile().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildAbort()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildAbort ( AbortReason  reason)
inlineprivate

Definition at line 1422 of file maglev-graph-builder.h.

1422  {
1423  // Create a block rather than calling finish, since we don't yet know the
1424  // next block's offset before the loop skipping the rest of the bytecodes.
1425  FinishBlock<Abort>({}, reason);
1426  return ReduceResult::DoneWithAbort();
1427  }

◆ BuildAndAllocateArgumentsObject()

template<CreateArgumentsType type>
ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildAndAllocateArgumentsObject
private

Definition at line 13968 of file maglev-graph-builder.cc.

13968  {
13969  auto arguments = BuildVirtualArgumentsObject<type>();
13970  ValueNode* allocation =
13972  return allocation;
13973 }
InlinedAllocation * BuildInlinedAllocation(VirtualObject *object, AllocationType allocation)

References v8::internal::kYoung.

◆ BuildAndAllocateJSArray()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildAndAllocateJSArray ( compiler::MapRef  map,
ValueNode length,
ValueNode elements,
const compiler::SlackTrackingPrediction slack_tracking_prediction,
AllocationType  allocation_type 
)
private

Definition at line 12105 of file maglev-graph-builder.cc.

12108  {
12109  VirtualObject* array;
12111  array,
12112  CreateJSArray(map, slack_tracking_prediction.instance_size(), length));
12113  array->set(JSArray::kElementsOffset, elements);
12114  for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
12115  i++) {
12116  array->set(map.GetInObjectPropertyOffset(i),
12117  GetRootConstant(RootIndex::kUndefinedValue));
12118  }
12119  array->ClearSlots(map.GetInObjectPropertyOffset(
12120  slack_tracking_prediction.inobject_property_count()),
12121  GetRootConstant(RootIndex::kOnePointerFillerMap));
12122  ValueNode* allocation = BuildInlinedAllocation(array, allocation_type);
12123  return allocation;
12124 }
RootConstant * GetRootConstant(RootIndex index)
ReduceResult CreateJSArray(compiler::MapRef map, int instance_size, ValueNode *length)
#define GET_VALUE_OR_ABORT(variable, result)

References v8::internal::maglev::VirtualObject::ClearSlots(), GET_VALUE_OR_ABORT, v8::internal::compiler::MapRef::GetInObjectPropertyOffset(), v8::internal::compiler::SlackTrackingPrediction::inobject_property_count(), v8::internal::compiler::SlackTrackingPrediction::instance_size(), v8::internal::length, and v8::internal::maglev::VirtualObject::set().

+ Here is the call graph for this function:

◆ BuildAndAllocateJSArrayIterator()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildAndAllocateJSArrayIterator ( ValueNode array,
IterationKind  iteration_kind 
)
private

Definition at line 12126 of file maglev-graph-builder.cc.

12127  {
12128  compiler::MapRef map =
12129  broker()->target_native_context().initial_array_iterator_map(broker());
12130  VirtualObject* iterator = CreateJSArrayIterator(map, array, iteration_kind);
12131  ValueNode* allocation =
12133  return allocation;
12134 }
NativeContextRef target_native_context() const
VirtualObject * CreateJSArrayIterator(compiler::MapRef map, ValueNode *iterated_object, IterationKind kind)

References broker(), and v8::internal::kYoung.

+ Here is the call graph for this function:

◆ BuildAndAllocateKeyValueArray()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildAndAllocateKeyValueArray ( ValueNode key,
ValueNode value 
)
private

Definition at line 12090 of file maglev-graph-builder.cc.

12091  {
12092  VirtualObject* elements = CreateFixedArray(broker()->fixed_array_map(), 2);
12093  elements->set(FixedArray::OffsetOfElementAt(0), key);
12094  elements->set(FixedArray::OffsetOfElementAt(1), value);
12095  compiler::MapRef map =
12096  broker()->target_native_context().js_array_packed_elements_map(broker());
12097  VirtualObject* array;
12099  array, CreateJSArray(map, map.instance_size(), GetInt32Constant(2)));
12100  array->set(JSArray::kElementsOffset, elements);
12101  ValueNode* allocation = BuildInlinedAllocation(array, AllocationType::kYoung);
12102  return allocation;
12103 }
VirtualObject * CreateFixedArray(compiler::MapRef map, int length)
Int32Constant * GetInt32Constant(int32_t constant)

References broker(), GET_VALUE_OR_ABORT, v8::internal::compiler::MapRef::instance_size(), v8::internal::key, v8::internal::kYoung, v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), v8::internal::maglev::VirtualObject::set(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildBody()

void v8::internal::maglev::MaglevGraphBuilder::BuildBody ( )
inline

Definition at line 295 of file maglev-graph-builder.h.

295  {
296  while (!source_position_iterator_.done() &&
298  current_source_position_ = SourcePosition(
300  inlining_id_);
302  }
304  iterator_.Advance()) {
306  if (V8_UNLIKELY(
308  PeelLoop();
310  interpreter::Bytecode::kJumpLoop);
311  continue;
312  }
314  }
316  is_inline() && caller_details_->loop_effects ? 1 : 0);
317  }
bool Contains(int i) const
Definition: bit-vector.h:180
#define V8_UNLIKELY(condition)
Definition: v8config.h:660

References v8::internal::SourcePositionTableIterator::Advance(), v8::internal::interpreter::BytecodeArrayIterator::Advance(), caller_details_, v8::internal::SourcePositionTableIterator::code_offset(), v8::internal::BitVector::Contains(), v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), v8::internal::interpreter::BytecodeArrayIterator::current_offset(), current_source_position_, DCHECK_EQ, v8::internal::SourcePositionTableIterator::done(), v8::internal::interpreter::BytecodeArrayIterator::done(), entrypoint_, v8::internal::LocalIsolate::heap(), inlining_id_, is_inline(), iterator_, local_isolate_, v8::internal::maglev::MaglevCallerDetails::loop_effects, loop_effects_stack_, loop_headers_to_peel_, PeelLoop(), v8::internal::LocalHeap::Safepoint(), v8::internal::SourcePosition::ScriptOffset(), v8::internal::interpreter::BytecodeArrayIterator::SetOffset(), v8::internal::SourcePositionTableIterator::source_position(), source_position_iterator_, V8_UNLIKELY, and VisitSingleBytecode().

Referenced by Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildBranchIfFloat64IsHole()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfFloat64IsHole ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14931 of file maglev-graph-builder.cc.

14932  {
14933  // TODO(victorgomes): Optimize.
14934  return builder.Build<BranchIfFloat64IsHole>({node});
14935 }

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build().

Referenced by BuildBranchIfUndetectable().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildBranchIfFloat64ToBooleanTrue()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfFloat64ToBooleanTrue ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14925 of file maglev-graph-builder.cc.

14926  {
14927  // TODO(victorgomes): Optimize.
14928  return builder.Build<BranchIfFloat64ToBooleanTrue>({node});
14929 }

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build().

+ Here is the call graph for this function:

◆ BuildBranchIfInt32Compare()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfInt32Compare ( BranchBuilder builder,
Operation  op,
ValueNode lhs,
ValueNode rhs 
)
private

Definition at line 14997 of file maglev-graph-builder.cc.

14998  {
14999  auto lhs_const = TryGetInt32Constant(lhs);
15000  if (lhs_const) {
15001  auto rhs_const = TryGetInt32Constant(rhs);
15002  if (rhs_const) {
15003  return builder.FromBool(
15004  CompareInt32(lhs_const.value(), rhs_const.value(), op));
15005  }
15006  }
15007  return builder.Build<BranchIfInt32Compare>({lhs, rhs}, op);
15008 }
std::optional< int32_t > TryGetInt32Constant(ValueNode *value)
bool CompareInt32(int32_t lhs, int32_t rhs, Operation operation)

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::CompareInt32(), and v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::FromBool().

Referenced by TryBuildNewConsString().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildBranchIfInt32ToBooleanTrue()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfInt32ToBooleanTrue ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14911 of file maglev-graph-builder.cc.

14912  {
14913  // TODO(victorgomes): Optimize.
14914  return builder.Build<BranchIfInt32ToBooleanTrue>({node});
14915 }

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build().

+ Here is the call graph for this function:

◆ BuildBranchIfIntPtrToBooleanTrue()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfIntPtrToBooleanTrue ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14918 of file maglev-graph-builder.cc.

14919  {
14920  // TODO(victorgomes): Optimize.
14921  return builder.Build<BranchIfIntPtrToBooleanTrue>({node});
14922 }

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build().

+ Here is the call graph for this function:

◆ BuildBranchIfJSReceiver()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfJSReceiver ( BranchBuilder builder,
ValueNode value 
)
private

Definition at line 14983 of file maglev-graph-builder.cc.

14984  {
14985  if (!value->is_tagged() && value->properties().value_representation() !=
14987  return builder.AlwaysFalse();
14988  }
14989  if (CheckType(value, NodeType::kJSReceiver)) {
14990  return builder.AlwaysTrue();
14991  } else if (HasDisjointType(value, NodeType::kJSReceiver)) {
14992  return builder.AlwaysFalse();
14993  }
14994  return builder.Build<BranchIfJSReceiver>({value});
14995 }
bool HasDisjointType(ValueNode *lhs, NodeType rhs_type)
bool CheckType(ValueNode *node, NodeType type, NodeType *old=nullptr)

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::AlwaysFalse(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::AlwaysTrue(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildBranchIfNull()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfNull ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14789 of file maglev-graph-builder.cc.

14790  {
14791  return BuildBranchIfRootConstant(builder, node, RootIndex::kNullValue);
14792 }
BranchResult BuildBranchIfRootConstant(BranchBuilder &builder, ValueNode *node, RootIndex root_index)

◆ BuildBranchIfReferenceEqual() [1/2]

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfReferenceEqual ( BranchBuilder builder,
ValueNode lhs,
ValueNode rhs 
)
private

Definition at line 14630 of file maglev-graph-builder.cc.

14632  {
14633  if (RootConstant* root_constant = rhs->TryCast<RootConstant>()) {
14634  return builder.Build<BranchIfRootConstant>({lhs}, root_constant->index());
14635  }
14636  if (RootConstant* root_constant = lhs->TryCast<RootConstant>()) {
14637  return builder.Build<BranchIfRootConstant>({rhs}, root_constant->index());
14638  }
14639  if (InlinedAllocation* alloc_lhs = lhs->TryCast<InlinedAllocation>()) {
14640  if (InlinedAllocation* alloc_rhs = rhs->TryCast<InlinedAllocation>()) {
14641  return builder.FromBool(alloc_lhs == alloc_rhs);
14642  }
14643  }
14644 
14645  return builder.Build<BranchIfReferenceEqual>({lhs, rhs});
14646 }

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::FromBool(), and v8::internal::maglev::NodeBase::TryCast().

+ Here is the call graph for this function:

◆ BuildBranchIfReferenceEqual() [2/2]

BasicBlock* v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfReferenceEqual ( ValueNode lhs,
ValueNode rhs,
BasicBlockRef true_target,
BasicBlockRef false_target 
)
private

◆ BuildBranchIfRootConstant()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfRootConstant ( BranchBuilder builder,
ValueNode node,
RootIndex  root_index 
)
private

Definition at line 14675 of file maglev-graph-builder.cc.

14676  {
14677  // We assume that Maglev never emits a comparison to a root number.
14678  DCHECK(!IsNumberRootConstant(root_index));
14679 
14680  // If the node we're checking is in the accumulator, swap it in the branch
14681  // with the checked value. Cache whether we want to swap, since after we've
14682  // swapped the accumulator isn't the original node anymore.
14683  BranchBuilder::PatchAccumulatorInBranchScope scope(builder, node, root_index);
14684 
14685  if (node->properties().value_representation() ==
14687  if (root_index == RootIndex::kUndefinedValue) {
14688 #ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
14689  return builder.Build<BranchIfFloat64IsUndefinedOrHole>({node});
14690 #else
14691  return builder.Build<BranchIfFloat64IsHole>({node});
14692 #endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
14693  }
14694  return builder.AlwaysFalse();
14695  }
14696 
14697  if (CheckType(node, NodeType::kNumber)) {
14698  return builder.AlwaysFalse();
14699  }
14700  CHECK(node->is_tagged());
14701 
14702  if (root_index != RootIndex::kTrueValue &&
14703  root_index != RootIndex::kFalseValue &&
14704  CheckType(node, NodeType::kBoolean)) {
14705  return builder.AlwaysFalse();
14706  }
14707 
14708  while (LogicalNot* logical_not = node->TryCast<LogicalNot>()) {
14709  // Bypassing logical not(s) on the input and swapping true/false
14710  // destinations.
14711  node = logical_not->value().node();
14712  builder.SwapTargets();
14713  }
14714 
14715  if (RootConstant* constant = node->TryCast<RootConstant>()) {
14716  return builder.FromBool(constant->index() == root_index);
14717  }
14718 
14719  if (root_index == RootIndex::kUndefinedValue) {
14720  if (Constant* constant = node->TryCast<Constant>()) {
14721  return builder.FromBool(constant->object().IsUndefined());
14722  }
14723  }
14724 
14725  if (root_index != RootIndex::kTrueValue &&
14726  root_index != RootIndex::kFalseValue) {
14727  return builder.Build<BranchIfRootConstant>({node}, root_index);
14728  }
14729  if (root_index == RootIndex::kFalseValue) {
14730  builder.SwapTargets();
14731  }
14732  switch (node->opcode()) {
14733  case Opcode::kTaggedEqual:
14735  builder, node->Cast<TaggedEqual>()->lhs().node(),
14736  node->Cast<TaggedEqual>()->rhs().node());
14737  case Opcode::kTaggedNotEqual:
14738  // Swapped true and false targets.
14739  builder.SwapTargets();
14741  builder, node->Cast<TaggedNotEqual>()->lhs().node(),
14742  node->Cast<TaggedNotEqual>()->rhs().node());
14743  case Opcode::kInt32Compare:
14744  return builder.Build<BranchIfInt32Compare>(
14745  {node->Cast<Int32Compare>()->left_input().node(),
14746  node->Cast<Int32Compare>()->right_input().node()},
14747  node->Cast<Int32Compare>()->operation());
14748  case Opcode::kFloat64Compare:
14749  return builder.Build<BranchIfFloat64Compare>(
14750  {node->Cast<Float64Compare>()->left_input().node(),
14751  node->Cast<Float64Compare>()->right_input().node()},
14752  node->Cast<Float64Compare>()->operation());
14753  case Opcode::kInt32ToBoolean:
14754  if (node->Cast<Int32ToBoolean>()->flip()) {
14755  builder.SwapTargets();
14756  }
14757  return builder.Build<BranchIfInt32ToBooleanTrue>(
14758  {node->Cast<Int32ToBoolean>()->value().node()});
14759  case Opcode::kIntPtrToBoolean:
14760  if (node->Cast<IntPtrToBoolean>()->flip()) {
14761  builder.SwapTargets();
14762  }
14763  return builder.Build<BranchIfIntPtrToBooleanTrue>(
14764  {node->Cast<IntPtrToBoolean>()->value().node()});
14765  case Opcode::kFloat64ToBoolean:
14766  if (node->Cast<Float64ToBoolean>()->flip()) {
14767  builder.SwapTargets();
14768  }
14769  return builder.Build<BranchIfFloat64ToBooleanTrue>(
14770  {node->Cast<Float64ToBoolean>()->value().node()});
14771  case Opcode::kTestUndetectable:
14772  return builder.Build<BranchIfUndetectable>(
14773  {node->Cast<TestUndetectable>()->value().node()},
14774  node->Cast<TestUndetectable>()->check_type());
14775  case Opcode::kHoleyFloat64IsHole:
14776  return builder.Build<BranchIfFloat64IsHole>(
14777  {node->Cast<HoleyFloat64IsHole>()->input().node()});
14778  default:
14779  return builder.Build<BranchIfRootConstant>({node}, RootIndex::kTrueValue);
14780  }
14781 }
BranchResult BuildBranchIfReferenceEqual(BranchBuilder &builder, ValueNode *lhs, ValueNode *rhs)

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::AlwaysFalse(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), v8::internal::maglev::NodeBase::Cast(), CHECK, v8::internal::DCHECK(), v8::internal::maglev::Int32ToBoolean::flip(), v8::internal::maglev::IntPtrToBoolean::flip(), v8::internal::maglev::Float64ToBoolean::flip(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::FromBool(), v8::internal::maglev::ValueNode::is_tagged(), v8::internal::maglev::TaggedEqual::lhs(), v8::internal::maglev::TaggedNotEqual::lhs(), v8::internal::maglev::Input::node(), v8::internal::maglev::NodeBase::opcode(), v8::internal::maglev::NodeBase::properties(), v8::internal::maglev::TaggedEqual::rhs(), v8::internal::maglev::TaggedNotEqual::rhs(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::SwapTargets(), v8::internal::maglev::NodeBase::TryCast(), v8::internal::value, and v8::internal::maglev::OpProperties::value_representation().

+ Here is the call graph for this function:

◆ BuildBranchIfToBooleanTrue()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfToBooleanTrue ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14819 of file maglev-graph-builder.cc.

14820  {
14821  // If this is a known boolean, use the non-ToBoolean version.
14822  if (CheckType(node, NodeType::kBoolean)) {
14823  return BuildBranchIfTrue(builder, node);
14824  }
14825 
14826  // There shouldn't be any LogicalNots here, for swapping true/false, since
14827  // these are known to be boolean and should have gone throught the
14828  // non-ToBoolean path.
14829  DCHECK(!node->Is<LogicalNot>());
14830 
14831  bool known_to_boolean_value = false;
14832  bool direction_is_true = true;
14833  if (IsConstantNode(node->opcode())) {
14834  known_to_boolean_value = true;
14835  direction_is_true = FromConstantToBool(local_isolate(), node);
14836  } else {
14837  // TODO(victorgomes): Unify this with TestUndetectable?
14838  // JSReceivers are true iff they are not marked as undetectable. Check if
14839  // all maps have the same detectability, and if yes, the boolean value is
14840  // known.
14841  NodeInfo* node_info = known_node_aspects().TryGetInfoFor(node);
14842  if (node_info && NodeTypeIs(node_info->type(), NodeType::kJSReceiver) &&
14843  node_info->possible_maps_are_known()) {
14844  bool all_detectable = true;
14845  bool all_undetectable = true;
14846  for (compiler::MapRef map : node_info->possible_maps()) {
14847  bool is_undetectable = map.is_undetectable();
14848  all_detectable &= !is_undetectable;
14849  all_undetectable &= is_undetectable;
14850  }
14851  if (all_detectable || all_undetectable) {
14852  known_to_boolean_value = true;
14853  direction_is_true = all_detectable;
14854  }
14855  }
14856  }
14857  if (known_to_boolean_value) {
14858  return builder.FromBool(direction_is_true);
14859  }
14860 
14861  switch (node->value_representation()) {
14862  // The ToBoolean of both the_hole and NaN is false, so we can use the
14863  // same operation for HoleyFloat64 and Float64.
14866  return BuildBranchIfFloat64ToBooleanTrue(builder, node);
14867 
14869  // Uint32 has the same logic as Int32 when converting ToBoolean, namely
14870  // comparison against zero, so we can cast it and ignore the signedness.
14871  node = AddNewNode<TruncateUint32ToInt32>({node});
14872  [[fallthrough]];
14874  return BuildBranchIfInt32ToBooleanTrue(builder, node);
14875 
14877  return BuildBranchIfIntPtrToBooleanTrue(builder, node);
14878 
14880  break;
14881  }
14882 
14883  NodeInfo* node_info = known_node_aspects().TryGetInfoFor(node);
14884  if (node_info) {
14885  if (ValueNode* as_int32 = node_info->alternative().int32()) {
14886  return BuildBranchIfInt32ToBooleanTrue(builder, as_int32);
14887  }
14888  if (ValueNode* as_float64 = node_info->alternative().float64()) {
14889  return BuildBranchIfFloat64ToBooleanTrue(builder, as_float64);
14890  }
14891  }
14892 
14893  NodeType old_type;
14894  if (CheckType(node, NodeType::kBoolean, &old_type)) {
14895  return builder.Build<BranchIfRootConstant>({node}, RootIndex::kTrueValue);
14896  }
14897  if (CheckType(node, NodeType::kSmi)) {
14898  builder.SwapTargets();
14899  return builder.Build<BranchIfReferenceEqual>({node, GetSmiConstant(0)});
14900  }
14901  if (CheckType(node, NodeType::kString)) {
14902  builder.SwapTargets();
14903  return builder.Build<BranchIfRootConstant>({node},
14904  RootIndex::kempty_string);
14905  }
14906  // TODO(verwaest): Number or oddball.
14907  return builder.Build<BranchIfToBooleanTrue>({node}, GetCheckType(old_type));
14908 }
BranchResult BuildBranchIfIntPtrToBooleanTrue(BranchBuilder &builder, ValueNode *node)
BranchResult BuildBranchIfTrue(BranchBuilder &builder, ValueNode *node)
BranchResult BuildBranchIfFloat64ToBooleanTrue(BranchBuilder &builder, ValueNode *node)
SmiConstant * GetSmiConstant(int constant) const
BranchResult BuildBranchIfInt32ToBooleanTrue(BranchBuilder &builder, ValueNode *node)
constexpr bool NodeTypeIs(NodeType type, NodeType to_check)
Definition: maglev-ir.h:743
bool FromConstantToBool(LocalIsolate *local_isolate, ValueNode *node)
Definition: maglev-ir.cc:365
Map::Bits1::HasPrototypeSlotBit Map::Bits1::HasNamedInterceptorBit is_undetectable
Definition: map-inl.h:113
const NodeInfo * TryGetInfoFor(ValueNode *node) const

References v8::internal::maglev::NodeInfo::alternative(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), v8::internal::DCHECK(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::FromBool(), v8::internal::maglev::FromConstantToBool(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), v8::internal::maglev::NodeBase::Is(), v8::internal::is_undetectable, v8::internal::maglev::IsConstantNode(), v8::internal::anonymous_namespace{ic.cc}::kIntPtr, v8::internal::compiler::kSmi, v8::internal::maglev::NodeTypeIs(), v8::internal::maglev::NodeBase::opcode(), v8::internal::maglev::NodeInfo::possible_maps(), v8::internal::maglev::NodeInfo::possible_maps_are_known(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::SwapTargets(), v8::internal::maglev::NodeInfo::type(), and v8::internal::maglev::ValueNode::value_representation().

+ Here is the call graph for this function:

◆ BuildBranchIfTrue()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfTrue ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14783 of file maglev-graph-builder.cc.

14784  {
14785  builder.SetBranchSpecializationMode(BranchSpecializationMode::kAlwaysBoolean);
14786  return BuildBranchIfRootConstant(builder, node, RootIndex::kTrueValue);
14787 }

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::SetBranchSpecializationMode().

+ Here is the call graph for this function:

◆ BuildBranchIfUint32Compare()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfUint32Compare ( BranchBuilder builder,
Operation  op,
ValueNode lhs,
ValueNode rhs 
)
private

Definition at line 15010 of file maglev-graph-builder.cc.

15011  {
15012  auto lhs_const = TryGetUint32Constant(lhs);
15013  if (lhs_const) {
15014  auto rhs_const = TryGetUint32Constant(rhs);
15015  if (rhs_const) {
15016  return builder.FromBool(
15017  CompareUint32(lhs_const.value(), rhs_const.value(), op));
15018  }
15019  }
15020  return builder.Build<BranchIfUint32Compare>({lhs, rhs}, op);
15021 }
std::optional< uint32_t > TryGetUint32Constant(ValueNode *value)
bool CompareUint32(uint32_t lhs, uint32_t rhs, Operation operation)

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::CompareUint32(), and v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::FromBool().

+ Here is the call graph for this function:

◆ BuildBranchIfUndefined()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfUndefined ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14794 of file maglev-graph-builder.cc.

14795  {
14796  return BuildBranchIfRootConstant(builder, node, RootIndex::kUndefinedValue);
14797 }

◆ BuildBranchIfUndefinedOrNull()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfUndefinedOrNull ( BranchBuilder builder,
ValueNode node 
)
private

Definition at line 14800 of file maglev-graph-builder.cc.

14801  {
14802  compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node);
14803  if (maybe_constant.has_value()) {
14804  return builder.FromBool(maybe_constant->IsNullOrUndefined());
14805  }
14806  if (!node->is_tagged()) {
14807  if (node->properties().value_representation() ==
14809  return BuildBranchIfFloat64IsHole(builder, node);
14810  }
14811  return builder.AlwaysFalse();
14812  }
14813  if (HasDisjointType(node, NodeType::kOddball)) {
14814  return builder.AlwaysFalse();
14815  }
14816  return builder.Build<BranchIfUndefinedOrNull>({node});
14817 }
compiler::OptionalHeapObjectRef TryGetConstant(ValueNode *node, ValueNode **constant_node=nullptr)
BranchResult BuildBranchIfFloat64IsHole(BranchBuilder &builder, ValueNode *node)

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::AlwaysFalse(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::FromBool(), v8::internal::maglev::ValueNode::is_tagged(), v8::internal::maglev::NodeBase::properties(), v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant(), and v8::internal::maglev::OpProperties::value_representation().

+ Here is the call graph for this function:

◆ BuildBranchIfUndetectable()

MaglevGraphBuilder::BranchResult v8::internal::maglev::MaglevGraphBuilder::BuildBranchIfUndetectable ( BranchBuilder builder,
ValueNode value 
)
private

Definition at line 3990 of file maglev-graph-builder.cc.

3991  {
3992  ValueNode* result = BuildTestUndetectable(value);
3993  switch (result->opcode()) {
3994  case Opcode::kRootConstant:
3995  switch (result->Cast<RootConstant>()->index()) {
3996  case RootIndex::kTrueValue:
3997  case RootIndex::kUndefinedValue:
3998  case RootIndex::kNullValue:
3999  return builder.AlwaysTrue();
4000  default:
4001  return builder.AlwaysFalse();
4002  }
4003  case Opcode::kHoleyFloat64IsHole:
4005  builder, result->Cast<HoleyFloat64IsHole>()->input().node());
4006  case Opcode::kTestUndetectable:
4007  return builder.Build<BranchIfUndetectable>(
4008  {result->Cast<TestUndetectable>()->value().node()},
4009  result->Cast<TestUndetectable>()->check_type());
4010  default:
4011  UNREACHABLE();
4012  }
4013 }
ValueNode * BuildTestUndetectable(ValueNode *value)

References v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::AlwaysFalse(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::AlwaysTrue(), v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build(), BuildBranchIfFloat64IsHole(), BuildTestUndetectable(), v8::internal::maglev::RootConstant::index(), v8::internal::maglev::HoleyFloat64IsHole::input(), v8::internal::maglev::Input::node(), v8::base::internal::result, v8::internal::UNREACHABLE(), and v8::internal::value.

Referenced by TryReduceTypeOf().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCallBuiltin() [1/2]

template<Builtin kBuiltin>
CallBuiltin* v8::internal::maglev::MaglevGraphBuilder::BuildCallBuiltin ( std::initializer_list< ValueNode * >  inputs)
inlineprivate

Definition at line 1326 of file maglev-graph-builder.h.

1326  {
1327  using Descriptor = typename CallInterfaceDescriptorFor<kBuiltin>::type;
1328  if constexpr (Descriptor::HasContextParameter()) {
1329  return AddNewNode<CallBuiltin>(
1330  inputs.size() + 1,
1331  [&](CallBuiltin* call_builtin) {
1332  int arg_index = 0;
1333  for (auto* input : inputs) {
1334  call_builtin->set_arg(arg_index++, input);
1335  }
1336  },
1337  kBuiltin, GetContext());
1338  } else {
1339  return AddNewNode<CallBuiltin>(
1340  inputs.size(),
1341  [&](CallBuiltin* call_builtin) {
1342  int arg_index = 0;
1343  for (auto* input : inputs) {
1344  call_builtin->set_arg(arg_index++, input);
1345  }
1346  },
1347  kBuiltin);
1348  }
1349  }
unsigned char * type
Definition: trace-event.h:457

References GetContext().

+ Here is the call graph for this function:

◆ BuildCallBuiltin() [2/2]

template<Builtin kBuiltin>
CallBuiltin* v8::internal::maglev::MaglevGraphBuilder::BuildCallBuiltin ( std::initializer_list< ValueNode * >  inputs,
compiler::FeedbackSource const &  feedback,
CallBuiltin::FeedbackSlotType  slot_type = CallBuiltin::kTaggedIndex 
)
inlineprivate

Definition at line 1352 of file maglev-graph-builder.h.

1355  {
1356  CallBuiltin* call_builtin = BuildCallBuiltin<kBuiltin>(inputs);
1357  call_builtin->set_feedback(feedback, slot_type);
1358 #ifdef DEBUG
1359  // Check that the last parameters are kSlot and kVector.
1360  using Descriptor = typename CallInterfaceDescriptorFor<kBuiltin>::type;
1361  int slot_index = call_builtin->InputCountWithoutContext();
1362  int vector_index = slot_index + 1;
1363  DCHECK_EQ(slot_index, Descriptor::kSlot);
1364  // TODO(victorgomes): Rename all kFeedbackVector parameters in the builtins
1365  // to kVector.
1366  DCHECK_EQ(vector_index, Descriptor::kVector);
1367 #endif // DEBUG
1368  return call_builtin;
1369  }
compiler::FeedbackVectorRef feedback() const

References DCHECK_EQ, v8::internal::maglev::CallBuiltin::InputCountWithoutContext(), and v8::internal::maglev::CallBuiltin::set_feedback().

+ Here is the call graph for this function:

◆ BuildCallCPPBuiltin()

CallCPPBuiltin* v8::internal::maglev::MaglevGraphBuilder::BuildCallCPPBuiltin ( Builtin  builtin,
ValueNode target,
ValueNode new_target,
std::initializer_list< ValueNode * >  inputs 
)
inlineprivate

Definition at line 1371 of file maglev-graph-builder.h.

1373  {
1374  DCHECK(Builtins::IsCpp(builtin));
1375  const size_t input_count = inputs.size() + CallCPPBuiltin::kFixedInputCount;
1376  return AddNewNode<CallCPPBuiltin>(
1377  input_count,
1378  [&](CallCPPBuiltin* call_builtin) {
1379  int arg_index = 0;
1380  for (auto* input : inputs) {
1381  call_builtin->set_arg(arg_index++, input);
1382  }
1383  },
1384  builtin, GetTaggedValue(target), GetTaggedValue(new_target),
1386  }
static V8_EXPORT_PRIVATE bool IsCpp(Builtin builtin)
Definition: builtins.cc:505
static constexpr int kFixedInputCount
Definition: maglev-ir.h:10228

References v8::internal::DCHECK(), v8::internal::compiler::turboshaft::detail::input_count(), v8::internal::Builtins::IsCpp(), and v8::internal::maglev::CallCPPBuiltin::set_arg().

+ Here is the call graph for this function:

◆ BuildCallFromRegisterList()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCallFromRegisterList ( ConvertReceiverMode  receiver_mode)
private

Definition at line 11759 of file maglev-graph-builder.cc.

11760  {
11761  ValueNode* target = LoadRegister(0);
11762  interpreter::RegisterList reg_list = iterator_.GetRegisterListOperand(1);
11763  FeedbackSlot slot = GetSlotOperand(3);
11764  compiler::FeedbackSource feedback_source(feedback(), slot);
11765  CallArguments args(receiver_mode, reg_list, current_interpreter_frame_);
11766  return BuildCallWithFeedback(target, args, feedback_source);
11767 }
RegisterList GetRegisterListOperand(int operand_index) const
ValueNode * LoadRegister(int operand_index)
ReduceResult BuildCallWithFeedback(ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source)
FeedbackSlot GetSlotOperand(int operand_index) const

References v8::base::args.

◆ BuildCallFromRegisters()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCallFromRegisters ( int  argc_count,
ConvertReceiverMode  receiver_mode 
)
private

Definition at line 11769 of file maglev-graph-builder.cc.

11770  {
11771  ValueNode* target = LoadRegister(0);
11772  const int receiver_count =
11773  (receiver_mode == ConvertReceiverMode::kNullOrUndefined) ? 0 : 1;
11774  const int reg_count = arg_count + receiver_count;
11775  FeedbackSlot slot = GetSlotOperand(reg_count + 1);
11776  compiler::FeedbackSource feedback_source(feedback(), slot);
11777  switch (reg_count) {
11778  case 0: {
11780  CallArguments args(receiver_mode);
11781  return BuildCallWithFeedback(target, args, feedback_source);
11782  }
11783  case 1: {
11784  CallArguments args(receiver_mode, {LoadRegister(1)});
11785  return BuildCallWithFeedback(target, args, feedback_source);
11786  }
11787  case 2: {
11788  CallArguments args(receiver_mode, {LoadRegister(1), LoadRegister(2)});
11789  return BuildCallWithFeedback(target, args, feedback_source);
11790  }
11791  case 3: {
11792  CallArguments args(receiver_mode,
11793  {LoadRegister(1), LoadRegister(2), LoadRegister(3)});
11794  return BuildCallWithFeedback(target, args, feedback_source);
11795  }
11796  default:
11797  UNREACHABLE();
11798  }
11799 }

References v8::base::args, DCHECK_EQ, v8::internal::kNullOrUndefined, and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ BuildCallKnownJSFunction() [1/2]

CallKnownJSFunction * v8::internal::maglev::MaglevGraphBuilder::BuildCallKnownJSFunction ( ValueNode context,
ValueNode function,
ValueNode new_target,
compiler::SharedFunctionInfoRef  shared,
base::Vector< ValueNode * >  arguments 
)
private

Definition at line 11169 of file maglev-graph-builder.cc.

11175  {
11176  DCHECK_GT(arguments.size(), 0);
11177  constexpr int kSkipReceiver = 1;
11178  int argcount_without_receiver =
11179  static_cast<int>(arguments.size()) - kSkipReceiver;
11180  size_t input_count =
11181  argcount_without_receiver + CallKnownJSFunction::kFixedInputCount;
11182  return AddNewNode<CallKnownJSFunction>(
11183  input_count,
11184  [&](CallKnownJSFunction* call) {
11185  for (int i = 0; i < argcount_without_receiver; i++) {
11186  call->set_arg(i, GetTaggedValue(arguments[i + kSkipReceiver]));
11187  }
11188  },
11189 #ifdef V8_ENABLE_LEAPTIERING
11190  dispatch_handle,
11191 #endif
11192  shared, GetTaggedValue(function), GetTaggedValue(context),
11193  GetTaggedValue(arguments[0]), GetTaggedValue(new_target));
11194 }

References DCHECK_GT, v8::internal::compiler::turboshaft::detail::input_count(), v8::internal::maglev::CallKnownJSFunction::set_arg(), and v8::base::Vector< T >::size().

+ Here is the call graph for this function:

◆ BuildCallKnownJSFunction() [2/2]

CallKnownJSFunction * v8::internal::maglev::MaglevGraphBuilder::BuildCallKnownJSFunction ( ValueNode context,
ValueNode function,
ValueNode new_target,
compiler::SharedFunctionInfoRef  shared,
compiler::FeedbackCellRef  feedback_cell,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11145 of file maglev-graph-builder.cc.

11152  {
11153  ValueNode* receiver = GetConvertReceiver(shared, args);
11155  return AddNewNode<CallKnownJSFunction>(
11156  input_count,
11157  [&](CallKnownJSFunction* call) {
11158  for (int i = 0; i < static_cast<int>(args.count()); i++) {
11159  call->set_arg(i, GetTaggedValue(args[i]));
11160  }
11161  },
11162 #ifdef V8_ENABLE_LEAPTIERING
11163  dispatch_handle,
11164 #endif
11165  shared, GetTaggedValue(function), GetTaggedValue(context),
11166  GetTaggedValue(receiver), GetTaggedValue(new_target));
11167 }
ValueNode * GetConvertReceiver(compiler::SharedFunctionInfoRef shared, const CallArguments &args)

References v8::base::args, v8::internal::compiler::turboshaft::detail::input_count(), and v8::internal::maglev::CallKnownJSFunction::set_arg().

+ Here is the call graph for this function:

◆ BuildCallRuntime()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCallRuntime ( Runtime::FunctionId  function_id,
std::initializer_list< ValueNode * >  inputs 
)
inlineprivate

Definition at line 1404 of file maglev-graph-builder.h.

1405  {
1406  CallRuntime* result = AddNewNode<CallRuntime>(
1407  inputs.size() + CallRuntime::kFixedInputCount,
1408  [&](CallRuntime* call_runtime) {
1409  int arg_index = 0;
1410  for (auto* input : inputs) {
1411  call_runtime->set_arg(arg_index++, GetTaggedValue(input));
1412  }
1413  },
1414  function_id, GetContext());
1415 
1416  if (RuntimeFunctionCanThrow(function_id)) {
1417  return BuildAbort(AbortReason::kUnexpectedReturnFromThrow);
1418  }
1419  return result;
1420  }
static constexpr int kFixedInputCount
Definition: maglev-ir.h:10339
constexpr bool RuntimeFunctionCanThrow(Runtime::FunctionId function_id)
ReduceResult BuildAbort(AbortReason reason)

References v8::base::internal::result.

◆ BuildCallSelf()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildCallSelf ( ValueNode context,
ValueNode function,
ValueNode new_target,
compiler::SharedFunctionInfoRef  shared,
CallArguments args 
)
private

Definition at line 10950 of file maglev-graph-builder.cc.

10952  {
10953  ValueNode* receiver = GetConvertReceiver(shared, args);
10954  size_t input_count = args.count() + CallSelf::kFixedInputCount;
10955  graph()->set_has_recursive_calls(true);
10956  DCHECK_EQ(
10958  shared.internal_formal_parameter_count_with_receiver_deprecated());
10959  return AddNewNode<CallSelf>(
10960  input_count,
10961  [&](CallSelf* call) {
10962  for (int i = 0; i < static_cast<int>(args.count()); i++) {
10963  call->set_arg(i, GetTaggedValue(args[i]));
10964  }
10965  },
10967  GetTaggedValue(function), GetTaggedValue(context),
10968  GetTaggedValue(receiver), GetTaggedValue(new_target));
10969 }
static constexpr int kFixedInputCount
Definition: maglev-ir.h:10478
void set_has_recursive_calls(bool value)
Definition: maglev-graph.h:171
MaglevCompilationUnit * toplevel_compilation_unit() const

References v8::base::args, DCHECK_EQ, graph(), v8::internal::compiler::turboshaft::detail::input_count(), v8::internal::compiler::SharedFunctionInfoRef::internal_formal_parameter_count_with_receiver_deprecated(), and v8::internal::maglev::CallSelf::set_arg().

+ Here is the call graph for this function:

◆ BuildCallWithFeedback()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCallWithFeedback ( ValueNode target_node,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11502 of file maglev-graph-builder.cc.

11504  {
11505  const compiler::ProcessedFeedback& processed_feedback =
11506  broker()->GetFeedbackForCall(feedback_source);
11507  if (processed_feedback.IsInsufficient()) {
11508  return EmitUnconditionalDeopt(
11509  DeoptimizeReason::kInsufficientTypeFeedbackForCall);
11510  }
11511 
11512  DCHECK_EQ(processed_feedback.kind(), compiler::ProcessedFeedback::kCall);
11513  const compiler::CallFeedback& call_feedback = processed_feedback.AsCall();
11514 
11515  if (call_feedback.target().has_value()) {
11516  if (call_feedback.target()->IsJSFunction()) {
11517  CallFeedbackContent content = call_feedback.call_feedback_content();
11518  compiler::JSFunctionRef feedback_target =
11519  call_feedback.target()->AsJSFunction();
11520  if (content == CallFeedbackContent::kReceiver) {
11521  compiler::NativeContextRef native_context =
11523  compiler::JSFunctionRef apply_function =
11524  native_context.function_prototype_apply(broker());
11526  target_node, apply_function, DeoptimizeReason::kWrongCallTarget));
11529  feedback_target, args, feedback_source),
11530  SetAccumulator);
11531  feedback_target = apply_function;
11532  } else {
11534  }
11536  target_node, feedback_target, DeoptimizeReason::kWrongCallTarget));
11537  } else if (call_feedback.target()->IsFeedbackCell() &&
11538  args.mode() == CallArguments::kDefault) {
11539  compiler::FeedbackCellRef feedback_cell =
11540  call_feedback.target()->AsFeedbackCell();
11541  compiler::OptionalSharedFunctionInfoRef shared =
11542  feedback_cell.shared_function_info(broker());
11543  if (shared.has_value() && !shared->HasBreakInfo(broker())) {
11544  RETURN_IF_ABORT(BuildCheckJSFunction(target_node));
11545  ValueNode* target_feedback_cell =
11546  BuildLoadJSFunctionFeedbackCell(target_node);
11548  BuildCheckValueByReference(target_feedback_cell, feedback_cell,
11549  DeoptimizeReason::kWrongFeedbackCell));
11550  if (IsClassConstructor(shared->kind())) {
11551  // If we have a class constructor, we should raise an exception.
11552  return BuildCallRuntime(Runtime::kThrowConstructorNonCallableError,
11553  {target_node});
11554  }
11555  ValueNode* context = BuildLoadJSFunctionContext(target_node);
11556  compiler::ScopeInfoRef scope_info = shared->scope_info(broker());
11557  if (scope_info.HasOuterScopeInfo()) {
11558  scope_info = scope_info.OuterScopeInfo(broker());
11559  CHECK(scope_info.HasContext());
11560  graph()->record_scope_info(context, scope_info);
11561  }
11564  context, target_node,
11565  GetRootConstant(RootIndex::kUndefinedValue),
11566 #ifdef V8_ENABLE_LEAPTIERING
11567  feedback_cell.dispatch_handle(),
11568 #endif
11569  shared.value(), feedback_cell, args, feedback_source),
11570  SetAccumulator);
11571  UNREACHABLE();
11572  }
11573  }
11574  }
11575  PROCESS_AND_RETURN_IF_DONE(ReduceCall(target_node, args, feedback_source),
11576  SetAccumulator);
11577  UNREACHABLE();
11578 }
ProcessedFeedback const & GetFeedbackForCall(FeedbackSource const &source)
MaybeReduceResult TryBuildCallKnownJSFunction(compiler::JSFunctionRef function, ValueNode *new_target, CallArguments &args, const compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryReduceFunctionPrototypeApplyCallWithReceiver(compiler::OptionalHeapObjectRef maybe_receiver, CallArguments &args, const compiler::FeedbackSource &feedback_source)
ReduceResult ReduceCall(ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
ValueNode * BuildLoadJSFunctionFeedbackCell(ValueNode *closure)
ValueNode * BuildLoadJSFunctionContext(ValueNode *closure)
ReduceResult EmitUnconditionalDeopt(DeoptimizeReason reason)
ReduceResult BuildCheckValueByReference(ValueNode *node, compiler::HeapObjectRef ref, DeoptimizeReason reason)
ReduceResult BuildCallRuntime(Runtime::FunctionId function_id, std::initializer_list< ValueNode * > inputs)
ReduceResult BuildCheckJSFunction(ValueNode *object)
#define RETURN_IF_ABORT(result)
#define PROCESS_AND_RETURN_IF_DONE(result, value_processor)
bool IsClassConstructor(FunctionKind kind)
!IsContextMap !IsContextMap native_context
Definition: map-inl.h:896

References v8::base::args, v8::internal::compiler::ProcessedFeedback::AsCall(), broker(), v8::internal::compiler::CallFeedback::call_feedback_content(), CHECK, DCHECK_EQ, graph(), v8::internal::compiler::ScopeInfoRef::HasContext(), v8::internal::compiler::ScopeInfoRef::HasOuterScopeInfo(), v8::internal::IsClassConstructor(), v8::internal::compiler::ProcessedFeedback::IsInsufficient(), v8::internal::compiler::ProcessedFeedback::kCall, v8::internal::compiler::ProcessedFeedback::kind(), v8::internal::kReceiver, v8::internal::kTarget, v8::internal::native_context, v8::internal::compiler::ScopeInfoRef::OuterScopeInfo(), PROCESS_AND_RETURN_IF_DONE, RETURN_IF_ABORT, v8::internal::compiler::FeedbackCellRef::shared_function_info(), v8::internal::compiler::CallFeedback::target(), v8::internal::UNREACHABLE(), and v8::internal::compiler::FeedbackCellRef::value().

+ Here is the call graph for this function:

◆ BuildCheckHeapObject()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckHeapObject ( ValueNode object)
private

Definition at line 4591 of file maglev-graph-builder.cc.

4591  {
4592  // Check for the empty type first so that we catch the case where
4593  // GetType(object) is already empty.
4594  if (IsEmptyNodeType(
4595  IntersectType(GetType(object), NodeType::kAnyHeapObject))) {
4597  }
4598  if (EnsureType(object, NodeType::kAnyHeapObject)) return ReduceResult::Done();
4599  AddNewNode<CheckHeapObject>({object});
4600  return ReduceResult::Done();
4601 }
bool EnsureType(ValueNode *node, NodeType type, NodeType *old=nullptr)
constexpr bool IsEmptyNodeType(NodeType type)
Definition: maglev-ir.h:874
constexpr NodeType IntersectType(NodeType left, NodeType right)
Definition: maglev-ir.h:731

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), GetType(), v8::internal::maglev::IntersectType(), v8::internal::maglev::IsEmptyNodeType(), and v8::internal::compiler::kSmi.

Referenced by TryBuildPropertyCellStore(), and TryBuildStoreField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckInternalizedStringValueOrByReference()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckInternalizedStringValueOrByReference ( ValueNode node,
compiler::HeapObjectRef  ref,
DeoptimizeReason  reason 
)
private

Definition at line 11247 of file maglev-graph-builder.cc.

11248  {
11249  if (!IsConstantNode(node->opcode()) && ref.IsInternalizedString()) {
11250  if (!IsInstanceOfNodeType(ref.map(broker()), GetType(node), broker())) {
11251  return EmitUnconditionalDeopt(reason);
11252  }
11253  AddNewNode<CheckValueEqualsString>({node}, ref.AsInternalizedString(),
11254  reason);
11255  SetKnownValue(node, ref, NodeType::kString);
11256  return ReduceResult::Done();
11257  }
11258  return BuildCheckValueByReference(node, ref, reason);
11259 }
void SetKnownValue(ValueNode *node, compiler::ObjectRef constant, NodeType new_node_type)
bool IsInstanceOfNodeType(compiler::MapRef map, NodeType type, compiler::JSHeapBroker *broker)
Definition: maglev-ir.h:946

References broker(), v8::internal::maglev::IsConstantNode(), v8::internal::maglev::IsInstanceOfNodeType(), v8::internal::compiler::HeapObjectRef::map(), and v8::internal::maglev::NodeBase::opcode().

+ Here is the call graph for this function:

◆ BuildCheckJSFunction()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckJSFunction ( ValueNode object)
private

Definition at line 4686 of file maglev-graph-builder.cc.

4686  {
4687  NodeType known_type;
4688  // Check for the empty type first so that we catch the case where
4689  // GetType(object) is already empty.
4690  if (IsEmptyNodeType(IntersectType(GetType(object), NodeType::kJSFunction))) {
4691  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongInstanceType);
4692  }
4693  if (EnsureType(object, NodeType::kJSFunction, &known_type))
4694  return ReduceResult::Done();
4695  AddNewNode<CheckInstanceType>({object}, GetCheckType(known_type),
4696  FIRST_JS_FUNCTION_TYPE, LAST_JS_FUNCTION_TYPE);
4697  return ReduceResult::Done();
4698 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

+ Here is the call graph for this function:

◆ BuildCheckJSReceiver()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckJSReceiver ( ValueNode object)
private

Definition at line 4700 of file maglev-graph-builder.cc.

4700  {
4701  NodeType known_type;
4702  // Check for the empty type first so that we catch the case where
4703  // GetType(object) is already empty.
4704  if (IsEmptyNodeType(IntersectType(GetType(object), NodeType::kJSReceiver))) {
4705  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongInstanceType);
4706  }
4707  if (EnsureType(object, NodeType::kJSReceiver, &known_type))
4708  return ReduceResult::Done();
4709  AddNewNode<CheckInstanceType>({object}, GetCheckType(known_type),
4710  FIRST_JS_RECEIVER_TYPE, LAST_JS_RECEIVER_TYPE);
4711  return ReduceResult::Done();
4712 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

Referenced by VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckJSReceiverOrNullOrUndefined()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckJSReceiverOrNullOrUndefined ( ValueNode object)
private

Definition at line 4714 of file maglev-graph-builder.cc.

4715  {
4716  NodeType known_type;
4717  // Check for the empty type first so that we catch the case where
4718  // GetType(object) is already empty.
4720  NodeType::kJSReceiverOrNullOrUndefined))) {
4721  return EmitUnconditionalDeopt(
4722  DeoptimizeReason::kNotAJavaScriptObjectOrNullOrUndefined);
4723  }
4724  if (EnsureType(object, NodeType::kJSReceiverOrNullOrUndefined, &known_type)) {
4725  return ReduceResult::Done();
4726  }
4727  AddNewNode<CheckJSReceiverOrNullOrUndefined>({object},
4728  GetCheckType(known_type));
4729  return ReduceResult::Done();
4730 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

Referenced by VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckMaps()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckMaps ( ValueNode object,
base::Vector< const compiler::MapRef maps,
std::optional< ValueNode * >  map = {},
bool  has_deprecated_map_without_migration_target = false,
bool  migration_done_outside = false 
)
private

Definition at line 4858 of file maglev-graph-builder.cc.

4862  {
4863  // TODO(verwaest): Support other objects with possible known stable maps as
4864  // well.
4865  if (compiler::OptionalHeapObjectRef constant = TryGetConstant(object)) {
4866  // For constants with stable maps that match one of the desired maps, we
4867  // don't need to emit a map check, and can use the dependency -- we
4868  // can't do this for unstable maps because the constant could migrate
4869  // during compilation.
4870  compiler::MapRef constant_map = constant.value().map(broker());
4871  if (std::find(maps.begin(), maps.end(), constant_map) != maps.end()) {
4872  if (constant_map.is_stable()) {
4873  broker()->dependencies()->DependOnStableMap(constant_map);
4874  return ReduceResult::Done();
4875  }
4876  // TODO(verwaest): Reduce maps to the constant map.
4877  } else {
4878  // TODO(leszeks): Insert an unconditional deopt if the constant map
4879  // doesn't match the required map.
4880  }
4881  }
4882 
4883  NodeInfo* known_info = GetOrCreateInfoFor(object);
4884 
4885  // Calculates if known maps are a subset of maps, their map intersection and
4886  // whether we should emit check with migration.
4887  KnownMapsMerger merger(broker(), zone(), maps);
4888  merger.IntersectWithKnownNodeAspects(object, known_node_aspects());
4889 
4890  if (IsEmptyNodeType(IntersectType(merger.node_type(), GetType(object)))) {
4891  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
4892  }
4893 
4894  // If the known maps are the subset of the maps to check, we are done.
4895  if (merger.known_maps_are_subset_of_requested_maps()) {
4896  // The node type of known_info can get out of sync with the possible maps.
4897  // For instance after merging with an effectively dead branch (i.e., check
4898  // contradicting all possible maps).
4899  // TODO(olivf) Try to combine node_info and possible maps and ensure that
4900  // narrowing the type also clears impossible possible_maps.
4901  if (!NodeTypeIs(known_info->type(), merger.node_type())) {
4902  known_info->UnionType(merger.node_type());
4903  }
4904 #ifdef DEBUG
4905  // Double check that, for every possible map, it's one of the maps we'd
4906  // want to check.
4907  for (compiler::MapRef possible_map :
4908  known_node_aspects().TryGetInfoFor(object)->possible_maps()) {
4909  DCHECK_NE(std::find(maps.begin(), maps.end(), possible_map), maps.end());
4910  }
4911 #endif
4912  return ReduceResult::Done();
4913  }
4914 
4915  if (merger.intersect_set().is_empty()) {
4916  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
4917  }
4918 
4919  // TODO(v8:7700): Check if the {maps} - {known_maps} size is smaller than
4920  // {maps} \intersect {known_maps}, we can emit CheckNotMaps instead.
4921 
4922  // Emit checks.
4923  if (merger.emit_check_with_migration() && !migration_done_outside) {
4924  AddNewNode<CheckMapsWithMigration>({object}, merger.intersect_set(),
4925  GetCheckType(known_info->type()));
4926  } else if (has_deprecated_map_without_migration_target &&
4927  !migration_done_outside) {
4928  AddNewNode<CheckMapsWithMigrationAndDeopt>(
4929  {object}, merger.intersect_set(), GetCheckType(known_info->type()));
4930  } else if (map) {
4931  AddNewNode<CheckMapsWithAlreadyLoadedMap>({object, *map},
4932  merger.intersect_set());
4933  } else {
4934  AddNewNode<CheckMaps>({object}, merger.intersect_set(),
4935  GetCheckType(known_info->type()));
4936  }
4937 
4938  merger.UpdateKnownNodeAspects(object, known_node_aspects());
4939  return ReduceResult::Done();
4940 }
CompilationDependencies * dependencies() const
NodeInfo * GetOrCreateInfoFor(ValueNode *node)
#define DCHECK_NE(v1, v2)
Definition: logging.h:485

References v8::base::Vector< T >::begin(), broker(), DCHECK_NE, v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnStableMap(), v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), v8::base::Vector< T >::end(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetOrCreateInfoFor(), GetType(), v8::internal::maglev::IntersectType(), v8::internal::compiler::MapRef::is_stable(), v8::internal::maglev::IsEmptyNodeType(), known_node_aspects(), v8::internal::maglev::NodeTypeIs(), TryGetConstant(), v8::internal::maglev::NodeInfo::type(), v8::internal::maglev::NodeInfo::UnionType(), and zone().

Referenced by TryBuildNamedAccess(), TryBuildPropertyCellStore(), and TryBuildStoreField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckNotHole()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckNotHole ( ValueNode node)
private

Definition at line 11325 of file maglev-graph-builder.cc.

11325  {
11326  if (!node->is_tagged()) return ReduceResult::Done();
11327  compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node);
11328  if (maybe_constant) {
11329  if (maybe_constant.value().IsTheHole()) {
11330  return EmitUnconditionalDeopt(DeoptimizeReason::kHole);
11331  }
11332  return ReduceResult::Done();
11333  }
11334  AddNewNode<CheckNotHole>({node});
11335  return ReduceResult::Done();
11336 }

References v8::internal::maglev::ValueNode::is_tagged(), and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ BuildCheckNumber()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckNumber ( ValueNode object)
private

Definition at line 4662 of file maglev-graph-builder.cc.

4662  {
4663  // Check for the empty type first so that we catch the case where
4664  // GetType(object) is already empty.
4665  if (IsEmptyNodeType(IntersectType(GetType(object), NodeType::kNumber))) {
4666  return EmitUnconditionalDeopt(DeoptimizeReason::kNotANumber);
4667  }
4668  if (EnsureType(object, NodeType::kNumber)) return ReduceResult::Done();
4669  AddNewNode<CheckNumber>({object}, Object::Conversion::kToNumber);
4670  return ReduceResult::Done();
4671 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), GetType(), v8::internal::maglev::IntersectType(), v8::internal::maglev::IsEmptyNodeType(), and v8::internal::Object::kToNumber.

Referenced by TryBuildNamedAccess(), and TrySpecializeStoreContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckNumericalValue()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckNumericalValue ( ValueNode node,
compiler::ObjectRef  ref,
DeoptimizeReason  reason 
)
private

Definition at line 11261 of file maglev-graph-builder.cc.

11262  {
11263  DCHECK(ref.IsSmi() || ref.IsHeapNumber());
11264  if (ref.IsSmi()) {
11265  int ref_value = ref.AsSmi();
11266  if (IsConstantNode(node->opcode())) {
11267  if (node->Is<SmiConstant>() &&
11268  node->Cast<SmiConstant>()->value().value() == ref_value) {
11269  return ReduceResult::Done();
11270  }
11271  if (node->Is<Int32Constant>() &&
11272  node->Cast<Int32Constant>()->value() == ref_value) {
11273  return ReduceResult::Done();
11274  }
11275  return EmitUnconditionalDeopt(reason);
11276  }
11277  if (NodeTypeIs(GetType(node), NodeType::kAnyHeapObject)) {
11278  return EmitUnconditionalDeopt(reason);
11279  }
11280  AddNewNode<CheckValueEqualsInt32>({node}, ref_value, reason);
11281  } else {
11282  DCHECK(ref.IsHeapNumber());
11283  Float64 ref_value = Float64::FromBits(ref.AsHeapNumber().value_as_bits());
11284  DCHECK(!ref_value.is_hole_nan());
11285  if (node->Is<Float64Constant>()) {
11286  Float64 f64 = node->Cast<Float64Constant>()->value();
11287  DCHECK(!f64.is_hole_nan());
11288  if (f64 == ref_value) {
11289  return ReduceResult::Done();
11290  }
11291  return EmitUnconditionalDeopt(reason);
11292  } else if (compiler::OptionalHeapObjectRef constant =
11293  TryGetConstant(node)) {
11294  if (constant.value().IsHeapNumber()) {
11295  Float64 f64 =
11296  Float64::FromBits(constant.value().AsHeapNumber().value_as_bits());
11297  DCHECK(!f64.is_hole_nan());
11298  if (f64 == ref_value) {
11299  return ReduceResult::Done();
11300  }
11301  }
11302  return EmitUnconditionalDeopt(reason);
11303  }
11304  if (!NodeTypeIs(NodeType::kNumber, GetType(node))) {
11305  return EmitUnconditionalDeopt(reason);
11306  }
11307  AddNewNode<CheckFloat64SameValue>({node}, ref_value, reason);
11308  }
11309 
11310  SetKnownValue(node, ref, NodeType::kNumber);
11311  return ReduceResult::Done();
11312 }
static constexpr Float64 FromBits(uint64_t bits)
Definition: boxed-float.h:124
FloatWithBits< 64 > Float64
Definition: index.h:240

References v8::internal::maglev::NodeBase::Cast(), v8::internal::DCHECK(), v8::internal::Float64::FromBits(), v8::internal::maglev::NodeBase::Is(), v8::internal::Float64::is_hole_nan(), v8::internal::maglev::IsConstantNode(), v8::internal::maglev::NodeTypeIs(), v8::internal::maglev::NodeBase::opcode(), v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant(), v8::internal::maglev::Int32Constant::value(), v8::internal::maglev::SmiConstant::value(), v8::internal::value, and v8::internal::Tagged< Smi >::value().

+ Here is the call graph for this function:

◆ BuildCheckNumericalValueOrByReference()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckNumericalValueOrByReference ( ValueNode node,
compiler::ObjectRef  ref,
DeoptimizeReason  reason 
)
private

Definition at line 11239 of file maglev-graph-builder.cc.

11240  {
11241  if (ref.IsHeapObject() && !ref.IsHeapNumber()) {
11242  return BuildCheckValueByReference(node, ref.AsHeapObject(), reason);
11243  }
11244  return BuildCheckNumericalValue(node, ref, reason);
11245 }
ReduceResult BuildCheckNumericalValue(ValueNode *node, compiler::ObjectRef ref, DeoptimizeReason reason)

Referenced by TryBuildPropertyCellStore(), and TrySpecializeStoreContextSlot().

+ Here is the caller graph for this function:

◆ BuildCheckSeqOneByteString()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckSeqOneByteString ( ValueNode object)
private

Definition at line 4603 of file maglev-graph-builder.cc.

4603  {
4604  NodeType known_type;
4605  // Check for the empty type first so that we catch the case where
4606  // GetType(object) is already empty.
4607  if (IsEmptyNodeType(
4608  IntersectType(GetType(object), NodeType::kSeqOneByteString))) {
4609  return EmitUnconditionalDeopt(DeoptimizeReason::kNotASeqOneByteString);
4610  }
4611  if (EnsureType(object, NodeType::kSeqOneByteString, &known_type)) {
4612  return ReduceResult::Done();
4613  }
4614  AddNewNode<CheckSeqOneByteString>({object}, GetCheckType(known_type));
4615  return ReduceResult::Done();
4616 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

Referenced by TryBuildNamedAccess().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckSmi()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckSmi ( ValueNode object,
bool  elidable = true 
)
private

Definition at line 4559 of file maglev-graph-builder.cc.

4560  {
4561  if (object->StaticTypeIs(broker(), NodeType::kSmi)) return object;
4562  // Check for the empty type first so that we catch the case where
4563  // GetType(object) is already empty.
4566  }
4567  if (EnsureType(object, NodeType::kSmi) && elidable) return object;
4568  switch (object->value_representation()) {
4570  if (!SmiValuesAre32Bits()) {
4571  AddNewNode<CheckInt32IsSmi>({object});
4572  }
4573  break;
4575  AddNewNode<CheckUint32IsSmi>({object});
4576  break;
4579  AddNewNode<CheckHoleyFloat64IsSmi>({object});
4580  break;
4582  AddNewNode<CheckSmi>({object});
4583  break;
4585  AddNewNode<CheckIntPtrIsSmi>({object});
4586  break;
4587  }
4588  return object;
4589 }
constexpr bool SmiValuesAre32Bits()
Definition: v8-internal.h:209

References broker(), EmitUnconditionalDeopt(), EnsureType(), GetType(), v8::internal::maglev::IntersectType(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::compiler::kSmi, v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, v8::internal::SmiValuesAre32Bits(), v8::internal::maglev::ValueNode::StaticTypeIs(), and v8::internal::maglev::ValueNode::value_representation().

Referenced by GetSmiValue(), and TrySpecializeStoreContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckString()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckString ( ValueNode object)
private

Definition at line 4618 of file maglev-graph-builder.cc.

4618  {
4619  NodeType known_type;
4620  // Check for the empty type first so that we catch the case where
4621  // GetType(object) is already empty.
4622  if (IsEmptyNodeType(IntersectType(GetType(object), NodeType::kString))) {
4623  return EmitUnconditionalDeopt(DeoptimizeReason::kNotAString);
4624  }
4625  if (EnsureType(object, NodeType::kString, &known_type)) {
4626  return ReduceResult::Done();
4627  }
4628  AddNewNode<CheckString>({object}, GetCheckType(known_type));
4629  return ReduceResult::Done();
4630 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

Referenced by BuildStringConcat(), TryBuildNamedAccess(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckStringOrOddball()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckStringOrOddball ( ValueNode object)
private

Definition at line 4647 of file maglev-graph-builder.cc.

4647  {
4648  NodeType known_type;
4649  // Check for the empty type first so that we catch the case where
4650  // GetType(object) is already empty.
4651  if (IsEmptyNodeType(
4652  IntersectType(GetType(object), NodeType::kStringOrOddball))) {
4653  return EmitUnconditionalDeopt(DeoptimizeReason::kNotAStringOrOddball);
4654  }
4655  if (EnsureType(object, NodeType::kStringOrOddball, &known_type)) {
4656  return ReduceResult::Done();
4657  }
4658  AddNewNode<CheckStringOrOddball>({object}, GetCheckType(known_type));
4659  return ReduceResult::Done();
4660 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

Referenced by VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckStringOrStringWrapper()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckStringOrStringWrapper ( ValueNode object)
private

Definition at line 4632 of file maglev-graph-builder.cc.

4633  {
4634  NodeType known_type;
4635  // Check for the empty type first so that we catch the case where
4636  // GetType(object) is already empty.
4637  if (IsEmptyNodeType(
4638  IntersectType(GetType(object), NodeType::kStringOrStringWrapper))) {
4639  return EmitUnconditionalDeopt(DeoptimizeReason::kNotAStringOrStringWrapper);
4640  }
4641  if (EnsureType(object, NodeType::kStringOrStringWrapper, &known_type))
4642  return ReduceResult::Done();
4643  AddNewNode<CheckStringOrStringWrapper>({object}, GetCheckType(known_type));
4644  return ReduceResult::Done();
4645 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

Referenced by VisitBinaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckSymbol()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckSymbol ( ValueNode object)
private

Definition at line 4673 of file maglev-graph-builder.cc.

4673  {
4674  NodeType known_type;
4675  // Check for the empty type first so that we catch the case where
4676  // GetType(object) is already empty.
4677  if (IsEmptyNodeType(IntersectType(GetType(object), NodeType::kSymbol))) {
4678  return EmitUnconditionalDeopt(DeoptimizeReason::kNotASymbol);
4679  }
4680  if (EnsureType(object, NodeType::kSymbol, &known_type))
4681  return ReduceResult::Done();
4682  AddNewNode<CheckSymbol>({object}, GetCheckType(known_type));
4683  return ReduceResult::Done();
4684 }

References v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetType(), v8::internal::maglev::IntersectType(), and v8::internal::maglev::IsEmptyNodeType().

Referenced by VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildCheckValueByReference()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCheckValueByReference ( ValueNode node,
compiler::HeapObjectRef  ref,
DeoptimizeReason  reason 
)
private

Definition at line 11219 of file maglev-graph-builder.cc.

11220  {
11221  DCHECK(!ref.IsSmi());
11222  DCHECK(!ref.IsHeapNumber());
11223 
11224  if (!IsInstanceOfNodeType(ref.map(broker()), GetType(node), broker())) {
11225  return EmitUnconditionalDeopt(reason);
11226  }
11227  if (compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node)) {
11228  if (maybe_constant.value().equals(ref)) {
11229  return ReduceResult::Done();
11230  }
11231  return EmitUnconditionalDeopt(reason);
11232  }
11233  AddNewNode<CheckValue>({node}, ref, reason);
11234  SetKnownValue(node, ref, StaticTypeForConstant(broker(), ref));
11235 
11236  return ReduceResult::Done();
11237 }
NodeType StaticTypeForConstant(compiler::JSHeapBroker *broker, compiler::ObjectRef ref)
Definition: maglev-ir.h:879

References broker(), v8::internal::DCHECK(), v8::internal::maglev::IsInstanceOfNodeType(), v8::internal::compiler::HeapObjectRef::map(), v8::internal::maglev::StaticTypeForConstant(), and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ BuildCompareMaps()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildCompareMaps ( ValueNode heap_object,
ValueNode object_map,
base::Vector< const compiler::MapRef maps,
MaglevSubGraphBuilder sub_graph,
std::optional< MaglevSubGraphBuilder::Label > &  if_not_matched 
)
private

Definition at line 4974 of file maglev-graph-builder.cc.

4977  {
4978  GetOrCreateInfoFor(heap_object);
4979  KnownMapsMerger merger(broker(), zone(), maps);
4980  merger.IntersectWithKnownNodeAspects(heap_object, known_node_aspects());
4981 
4982  if (merger.intersect_set().is_empty()) {
4983  return ReduceResult::DoneWithAbort();
4984  }
4985 
4986  // TODO(pthier): Support map packing.
4988 
4989  // TODO(pthier): Handle map migrations.
4990  std::optional<MaglevSubGraphBuilder::Label> map_matched;
4991  const compiler::ZoneRefSet<Map>& relevant_maps = merger.intersect_set();
4992  if (relevant_maps.size() > 1) {
4993  map_matched.emplace(sub_graph, static_cast<int>(relevant_maps.size()));
4994  for (size_t map_index = 1; map_index < relevant_maps.size(); map_index++) {
4995  sub_graph->GotoIfTrue<BranchIfReferenceEqual>(
4996  &*map_matched,
4997  {object_map, GetConstant(relevant_maps.at(map_index))});
4998  }
4999  }
5000  if_not_matched.emplace(sub_graph, 1);
5001  sub_graph->GotoIfFalse<BranchIfReferenceEqual>(
5002  &*if_not_matched, {object_map, GetConstant(relevant_maps.at(0))});
5003  if (map_matched.has_value()) {
5004  sub_graph->Goto(&*map_matched);
5005  sub_graph->Bind(&*map_matched);
5006  }
5007  merger.UpdateKnownNodeAspects(heap_object, known_node_aspects());
5008  return ReduceResult::Done();
5009 }
ValueNode * GetConstant(compiler::ObjectRef ref)
#define V8_MAP_PACKING_BOOL
Definition: globals.h:95

References v8::internal::ZoneCompactSet< T >::at(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(), broker(), v8::internal::DCHECK(), v8::internal::maglev::ReduceResult::Done(), v8::internal::maglev::ReduceResult::DoneWithAbort(), GetConstant(), GetOrCreateInfoFor(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Goto(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfFalse(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfTrue(), known_node_aspects(), v8::internal::ZoneCompactSet< T >::size(), V8_MAP_PACKING_BOOL, and zone().

+ Here is the call graph for this function:

◆ BuildConstruct()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildConstruct ( ValueNode target,
ValueNode new_target,
CallArguments args,
compiler::FeedbackSource feedback_source 
)
private

Definition at line 12447 of file maglev-graph-builder.cc.

12449  {
12450  compiler::ProcessedFeedback const& processed_feedback =
12451  broker()->GetFeedbackForCall(feedback_source);
12452  if (processed_feedback.IsInsufficient()) {
12453  return EmitUnconditionalDeopt(
12454  DeoptimizeReason::kInsufficientTypeFeedbackForConstruct);
12455  }
12456 
12457  DCHECK_EQ(processed_feedback.kind(), compiler::ProcessedFeedback::kCall);
12458  compiler::OptionalHeapObjectRef feedback_target =
12459  processed_feedback.AsCall().target();
12460  if (feedback_target.has_value() && feedback_target->IsAllocationSite()) {
12461  // The feedback is an AllocationSite, which means we have called the
12462  // Array function and collected transition (and pretenuring) feedback
12463  // for the resulting arrays.
12464  compiler::JSFunctionRef array_function =
12465  broker()->target_native_context().array_function(broker());
12467  target, array_function, DeoptimizeReason::kWrongConstructor));
12469  TryReduceConstructArrayConstructor(array_function, args,
12470  feedback_target->AsAllocationSite()),
12471  SetAccumulator);
12472  } else {
12473  if (feedback_target.has_value()) {
12475  TryReduceConstruct(feedback_target.value(), target, new_target, args,
12476  feedback_source),
12477  SetAccumulator);
12478  }
12479  if (compiler::OptionalHeapObjectRef maybe_constant =
12480  TryGetConstant(target)) {
12482  TryReduceConstruct(maybe_constant.value(), target, new_target, args,
12483  feedback_source),
12484  SetAccumulator);
12485  }
12486  }
12487  ValueNode* context = GetContext();
12488  SetAccumulator(BuildGenericConstruct(target, new_target, context, args,
12489  feedback_source));
12490  return ReduceResult::Done();
12491 }
ValueNode * BuildGenericConstruct(ValueNode *target, ValueNode *new_target, ValueNode *context, const CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
MaybeReduceResult TryReduceConstructArrayConstructor(compiler::JSFunctionRef array_function, CallArguments &args, compiler::OptionalAllocationSiteRef maybe_allocation_site={})
MaybeReduceResult TryReduceConstruct(compiler::HeapObjectRef feedback_target, ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)

References v8::base::args, v8::internal::compiler::ProcessedFeedback::AsCall(), broker(), DCHECK_EQ, v8::internal::compiler::ProcessedFeedback::IsInsufficient(), v8::internal::compiler::ProcessedFeedback::kCall, v8::internal::compiler::ProcessedFeedback::kind(), PROCESS_AND_RETURN_IF_DONE, RETURN_IF_ABORT, v8::internal::compiler::CallFeedback::target(), and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ BuildContinuationForPolymorphicPropertyLoad()

void v8::internal::maglev::MaglevGraphBuilder::BuildContinuationForPolymorphicPropertyLoad ( const ContinuationOffsets offsets)
private

Definition at line 7343 of file maglev-graph-builder.cc.

7344  {
7345  while (iterator_.current_offset() < continuation.last_continuation) {
7346  iterator_.Advance();
7348  }
7349 }

References v8::internal::maglev::MaglevGraphBuilder::ContinuationOffsets::last_continuation.

◆ BuildConvertHoleToUndefined()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildConvertHoleToUndefined ( ValueNode node)
private

Definition at line 11314 of file maglev-graph-builder.cc.

11314  {
11315  if (!node->is_tagged()) return node;
11316  compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(node);
11317  if (maybe_constant) {
11318  return maybe_constant.value().IsTheHole()
11319  ? GetRootConstant(RootIndex::kUndefinedValue)
11320  : node;
11321  }
11322  return AddNewNode<ConvertHoleToUndefined>({node});
11323 }

References v8::internal::maglev::ValueNode::is_tagged(), and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ BuildEagerInlineCall()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildEagerInlineCall ( ValueNode context,
ValueNode function,
ValueNode new_target,
compiler::SharedFunctionInfoRef  shared,
compiler::FeedbackCellRef  feedback_cell,
CallArguments args,
float  call_frequency 
)
private

Definition at line 8579 of file maglev-graph-builder.cc.

8583  {
8585 
8586  // Merge catch block state if needed.
8587  CatchBlockDetails catch_block_details = GetCurrentTryCatchBlock();
8588  if (catch_block_details.ref &&
8589  catch_block_details.exception_handler_was_used) {
8590  if (IsInsideTryBlock()) {
8591  // Merge the current state into the handler state.
8593  this, compilation_unit_,
8596  }
8597  catch_block_details.deopt_frame_distance++;
8598  }
8599 
8600  // Create a new compilation unit.
8601  MaglevCompilationUnit* inner_unit = MaglevCompilationUnit::NewInner(
8602  zone(), compilation_unit_, shared, feedback_cell);
8603 
8604  // Propagate details.
8605  auto arguments_vector = GetArgumentsAsArrayOfValueNodes(shared, args);
8606  DeoptFrame* deopt_frame =
8607  GetDeoptFrameForEagerCall(inner_unit, function, arguments_vector);
8608  MaglevCallerDetails* caller_details = zone()->New<MaglevCallerDetails>(
8609  arguments_vector, deopt_frame,
8611  unobserved_context_slot_stores_, catch_block_details, IsInsideLoop(),
8612  /* is_eager_inline */ true, call_frequency);
8613 
8614  // Create a new graph builder for the inlined function.
8615  MaglevGraphBuilder inner_graph_builder(local_isolate_, inner_unit, graph_,
8616  caller_details);
8617 
8618  // Set the inner graph builder to build in the current block.
8619  inner_graph_builder.current_block_ = current_block_;
8620 
8621  // Build inline function.
8622  ReduceResult result = inner_graph_builder.BuildInlineFunction(
8623  current_source_position_, context, function, new_target);
8624 
8625  // Prapagate back (or reset) builder state.
8627  inner_graph_builder.unobserved_context_slot_stores_;
8630 
8631  if (result.IsDoneWithAbort()) {
8632  DCHECK_NULL(inner_graph_builder.current_block_);
8633  current_block_ = nullptr;
8634  return ReduceResult::DoneWithAbort();
8635  }
8636 
8637  // Propagate frame information back to the caller.
8639  inner_graph_builder.current_interpreter_frame_.known_node_aspects());
8641  inner_graph_builder.current_interpreter_frame_.virtual_objects());
8643  inner_graph_builder.current_for_in_state.receiver_needs_map_check;
8644 
8645  // Resume execution using the final block of the inner builder.
8646  current_block_ = inner_graph_builder.current_block_;
8647 
8648  DCHECK(result.IsDoneWithValue());
8649  return result;
8650 }
void set_known_node_aspects(KnownNodeAspects *known_node_aspects)
void set_virtual_objects(const VirtualObjectList &virtual_objects)
static MaglevCompilationUnit * NewInner(Zone *zone, const MaglevCompilationUnit *caller, compiler::SharedFunctionInfoRef shared_function_info, compiler::FeedbackCellRef feedback_cell)
DeoptFrame * GetDeoptFrameForEagerCall(const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
base::Vector< ValueNode * > GetArgumentsAsArrayOfValueNodes(compiler::SharedFunctionInfoRef shared, const CallArguments &args)
MaglevGraphBuilder(LocalIsolate *local_isolate, MaglevCompilationUnit *compilation_unit, Graph *graph, MaglevCallerDetails *caller_details=nullptr)
std::optional< DeoptFrame > latest_checkpointed_frame_
void MergeThrow(MaglevGraphBuilder *handler_builder, const MaglevCompilationUnit *handler_unit, const KnownNodeAspects &known_node_aspects, const VirtualObjectList virtual_objects)
#define DCHECK_NULL(val)
Definition: logging.h:490

References v8::base::args, v8::internal::DCHECK(), DCHECK_EQ, DCHECK_NULL, v8::internal::maglev::CatchBlockDetails::deopt_frame_distance, v8::internal::maglev::CatchBlockDetails::exception_handler_was_used, v8::internal::maglev::CatchBlockDetails::ref, and v8::base::internal::result.

+ Here is the call graph for this function:

◆ BuildElementsArray()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildElementsArray ( int  length)
private

Definition at line 12200 of file maglev-graph-builder.cc.

12200  {
12201  if (length == 0) {
12202  return GetRootConstant(RootIndex::kEmptyFixedArray);
12203  }
12204  VirtualObject* elements =
12205  CreateFixedArray(broker()->fixed_array_map(), length);
12206  auto hole = GetRootConstant(RootIndex::kTheHoleValue);
12207  for (int i = 0; i < length; i++) {
12208  elements->set(FixedArray::OffsetOfElementAt(i), hole);
12209  }
12210  return elements;
12211 }

References broker(), v8::internal::length, v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), and v8::internal::maglev::VirtualObject::set().

+ Here is the call graph for this function:

◆ BuildExtendPropertiesBackingStore()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildExtendPropertiesBackingStore ( compiler::MapRef  map,
ValueNode receiver,
ValueNode property_array 
)
private

Definition at line 5662 of file maglev-graph-builder.cc.

5663  {
5664  int length = map.NextFreePropertyIndex() - map.GetInObjectProperties();
5665  // Under normal circumstances, NextFreePropertyIndex() will always be larger
5666  // than GetInObjectProperties(). However, an attacker able to corrupt heap
5667  // memory can break this invariant, in which case we'll get confused here,
5668  // potentially causing a sandbox violation. This CHECK defends against that.
5669  SBXCHECK_GE(length, 0);
5670  return AddNewNode<ExtendPropertiesBackingStore>({property_array, receiver},
5671  length);
5672 }
#define SBXCHECK_GE(lhs, rhs)
Definition: check.h:58

References v8::internal::compiler::MapRef::GetInObjectProperties(), v8::internal::length, v8::internal::compiler::MapRef::NextFreePropertyIndex(), and SBXCHECK_GE.

Referenced by TryBuildStoreField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildFloat64BinaryOperationNodeForToNumber()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildFloat64BinaryOperationNodeForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2618 of file maglev-graph-builder.cc.

2620  {
2621  // TODO(v8:7700): Do constant identity folding. Make sure to normalize
2622  // HoleyFloat64 nodes if folded.
2623  ValueNode* left = LoadRegisterHoleyFloat64ForToNumber(0, allowed_input_type,
2624  conversion_type);
2625  ValueNode* right = GetAccumulatorHoleyFloat64ForToNumber(allowed_input_type,
2626  conversion_type);
2628  TryFoldFloat64BinaryOperationForToNumber<kOperation>(conversion_type,
2629  left, right),
2630  SetAccumulator);
2631  SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
2632  return ReduceResult::Done();
2633 }
NodeT * AddNewNode(size_t input_count, Function &&post_create_input_initializer, Args &&... args)
ValueNode * LoadRegisterHoleyFloat64ForToNumber(int operand_index, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
ValueNode * GetAccumulatorHoleyFloat64ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)

References AddNewNode(), v8::internal::maglev::ReduceResult::Done(), GetAccumulatorHoleyFloat64ForToNumber(), LoadRegisterHoleyFloat64ForToNumber(), PROCESS_AND_RETURN_IF_DONE, and SetAccumulator().

+ Here is the call graph for this function:

◆ BuildFloat64BinarySmiOperationNodeForToNumber()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildFloat64BinarySmiOperationNodeForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2571 of file maglev-graph-builder.cc.

2573  {
2574  // TODO(v8:7700): Do constant identity folding. Make sure to normalize
2575  // HoleyFloat64 nodes if folded.
2576  ValueNode* left = GetAccumulatorHoleyFloat64ForToNumber(allowed_input_type,
2577  conversion_type);
2578  double constant = static_cast<double>(iterator_.GetImmediateOperand(0));
2580  TryFoldFloat64BinaryOperationForToNumber<kOperation>(conversion_type,
2581  left, constant),
2582  SetAccumulator);
2583  ValueNode* right = GetFloat64Constant(constant);
2584  SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
2585  return ReduceResult::Done();
2586 }
Float64Constant * GetFloat64Constant(double constant)

References AddNewNode(), v8::internal::maglev::ReduceResult::Done(), GetAccumulatorHoleyFloat64ForToNumber(), GetFloat64Constant(), v8::internal::interpreter::BytecodeArrayIterator::GetImmediateOperand(), iterator_, PROCESS_AND_RETURN_IF_DONE, and SetAccumulator().

+ Here is the call graph for this function:

◆ BuildFloat64UnaryOperationNodeForToNumber()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildFloat64UnaryOperationNodeForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2589 of file maglev-graph-builder.cc.

2591  {
2592  // TODO(v8:7700): Do constant identity folding. Make sure to normalize
2593  // HoleyFloat64 nodes if folded.
2594  ValueNode* value = GetAccumulatorHoleyFloat64ForToNumber(allowed_input_type,
2595  conversion_type);
2597  TryFoldFloat64UnaryOperationForToNumber<kOperation>(conversion_type,
2598  value),
2599  SetAccumulator);
2600  switch (kOperation) {
2601  case Operation::kNegate:
2602  SetAccumulator(AddNewNode<Float64Negate>({value}));
2603  break;
2604  case Operation::kIncrement:
2605  SetAccumulator(AddNewNode<Float64Add>({value, GetFloat64Constant(1)}));
2606  break;
2607  case Operation::kDecrement:
2609  AddNewNode<Float64Subtract>({value, GetFloat64Constant(1)}));
2610  break;
2611  default:
2612  UNREACHABLE();
2613  }
2614  return ReduceResult::Done();
2615 }

References v8::internal::maglev::ReduceResult::Done(), GetAccumulatorHoleyFloat64ForToNumber(), GetFloat64Constant(), v8::kOperation, PROCESS_AND_RETURN_IF_DONE, SetAccumulator(), v8::internal::UNREACHABLE(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildGenericBinaryOperationNode()

template<Operation kOperation>
void v8::internal::maglev::MaglevGraphBuilder::BuildGenericBinaryOperationNode
private

Definition at line 2284 of file maglev-graph-builder.cc.

2284  {
2285  ValueNode* left = LoadRegister(0);
2286  ValueNode* right = GetAccumulator();
2287  FeedbackSlot slot_index = GetSlotOperand(1);
2288  SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
2289  {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
2290 }

References AddNewNode(), feedback(), GetAccumulator(), GetSlotOperand(), LoadRegister(), and SetAccumulator().

+ Here is the call graph for this function:

◆ BuildGenericBinarySmiOperationNode()

template<Operation kOperation>
void v8::internal::maglev::MaglevGraphBuilder::BuildGenericBinarySmiOperationNode
private

Definition at line 2293 of file maglev-graph-builder.cc.

2293  {
2294  ValueNode* left = GetAccumulator();
2295  int constant = iterator_.GetImmediateOperand(0);
2296  ValueNode* right = GetSmiConstant(constant);
2297  FeedbackSlot slot_index = GetSlotOperand(1);
2298  SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
2299  {left, right}, compiler::FeedbackSource{feedback(), slot_index}));
2300 }

References AddNewNode(), feedback(), GetAccumulator(), v8::internal::interpreter::BytecodeArrayIterator::GetImmediateOperand(), GetSlotOperand(), GetSmiConstant(), iterator_, and SetAccumulator().

+ Here is the call graph for this function:

◆ BuildGenericCall()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildGenericCall ( ValueNode target,
Call::TargetType  target_type,
const CallArguments args 
)
private

Definition at line 10924 of file maglev-graph-builder.cc.

10926  {
10927  // TODO(victorgomes): We do not collect call feedback from optimized/inlined
10928  // calls. In order to be consistent, we don't pass the feedback_source to the
10929  // IR, so that we avoid collecting for generic calls as well. We might want to
10930  // revisit this in the future.
10931  switch (args.mode()) {
10933  return AddNewCallNode<Call>(args, args.receiver_mode(), target_type,
10934  GetTaggedValue(target),
10937  DCHECK_EQ(args.receiver_mode(), ConvertReceiverMode::kAny);
10938  return AddNewCallNode<CallWithSpread>(args, GetTaggedValue(target),
10941  DCHECK_EQ(args.receiver_mode(), ConvertReceiverMode::kAny);
10942  // We don't use AddNewCallNode here, because the number of required
10943  // arguments is known statically.
10944  return AddNewNode<CallWithArrayLike>(
10945  {target, GetValueOrUndefined(args.receiver()), args[0],
10946  GetContext()});
10947  }
10948 }

References v8::base::args, DCHECK_EQ, and v8::internal::kAny.

◆ BuildGenericConstruct()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildGenericConstruct ( ValueNode target,
ValueNode new_target,
ValueNode context,
const CallArguments args,
const compiler::FeedbackSource feedback_source = compiler::FeedbackSource() 
)
private

Definition at line 12069 of file maglev-graph-builder.cc.

12072  {
12073  size_t input_count = args.count_with_receiver() + Construct::kFixedInputCount;
12075  return AddNewNode<Construct>(
12076  input_count,
12077  [&](Construct* construct) {
12078  int arg_index = 0;
12079  // Add undefined receiver.
12080  construct->set_arg(arg_index++,
12081  GetRootConstant(RootIndex::kUndefinedValue));
12082  for (size_t i = 0; i < args.count(); i++) {
12083  construct->set_arg(arg_index++, GetTaggedValue(args[i]));
12084  }
12085  },
12086  feedback_source, GetTaggedValue(target), GetTaggedValue(new_target),
12087  GetTaggedValue(context));
12088 }
static constexpr int kFixedInputCount
Definition: maglev-ir.h:10060
MaybeDirectHandle< JSLocale > Construct(Isolate *isolate, const icu::Locale &icu_locale)
Definition: js-locale.cc:425

References v8::base::args, DCHECK_EQ, v8::internal::compiler::turboshaft::detail::input_count(), v8::internal::kNullOrUndefined, and v8::internal::maglev::Construct::set_arg().

+ Here is the call graph for this function:

◆ BuildGenericUnaryOperationNode()

template<Operation kOperation>
void v8::internal::maglev::MaglevGraphBuilder::BuildGenericUnaryOperationNode
private

Definition at line 2276 of file maglev-graph-builder.cc.

2276  {
2277  FeedbackSlot slot_index = GetSlotOperand(0);
2278  ValueNode* value = GetAccumulator();
2279  SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
2280  {value}, compiler::FeedbackSource{feedback(), slot_index}));
2281 }

References AddNewNode(), feedback(), GetAccumulator(), GetSlotOperand(), SetAccumulator(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildGetKeyedProperty()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildGetKeyedProperty ( ValueNode object,
const compiler::FeedbackSource feedback_source,
const compiler::ProcessedFeedback processed_feedback 
)
private

Definition at line 7616 of file maglev-graph-builder.cc.

7618  {
7620  object, feedback_source, processed_feedback));
7621 
7622  auto build_generic_access = [this, object, &feedback_source]() {
7623  ValueNode* context = GetContext();
7624  ValueNode* key = GetAccumulator();
7625  return AddNewNode<GetKeyedGeneric>({context, object, key}, feedback_source);
7626  };
7627 
7628  switch (processed_feedback.kind()) {
7630  return EmitUnconditionalDeopt(
7631  DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess);
7632 
7634  // Get the accumulator without conversion. TryBuildElementAccess
7635  // will try to pick the best representation.
7637  MaybeReduceResult result = TryBuildElementAccess(
7638  object, index, processed_feedback.AsElementAccess(), feedback_source,
7639  build_generic_access);
7641  break;
7642  }
7643 
7645  ValueNode* key = GetAccumulator();
7646  compiler::NameRef name = processed_feedback.AsNamedAccess().name();
7648  key, name, DeoptimizeReason::kKeyedAccessChanged));
7649 
7650  MaybeReduceResult result = TryReuseKnownPropertyLoad(object, name);
7652 
7654  object, object, processed_feedback.AsNamedAccess(), feedback_source,
7655  compiler::AccessMode::kLoad, build_generic_access);
7657  break;
7658  }
7659 
7660  default:
7661  break;
7662  }
7663 
7664  // Create a generic load in the fallthrough.
7665  SetAccumulator(build_generic_access());
7666  return ReduceResult::Done();
7667 }
MaybeReduceResult TryBuildGetKeyedPropertyWithEnumeratedKey(ValueNode *object, const compiler::FeedbackSource &feedback_source, const compiler::ProcessedFeedback &processed_feedback)
MaybeReduceResult TryReuseKnownPropertyLoad(ValueNode *lookup_start_object, compiler::NameRef name)
MaybeReduceResult TryBuildNamedAccess(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NamedAccessFeedback const &feedback, compiler::FeedbackSource const &feedback_source, compiler::AccessMode access_mode, GenericAccessFunc &&build_generic_access)
ReduceResult BuildCheckInternalizedStringValueOrByReference(ValueNode *node, compiler::HeapObjectRef ref, DeoptimizeReason reason)
MaybeReduceResult TryBuildElementAccess(ValueNode *object, ValueNode *index, compiler::ElementAccessFeedback const &feedback, compiler::FeedbackSource const &feedback_source, GenericAccessFunc &&build_generic_access)
#define RETURN_IF_DONE(result)

References v8::internal::compiler::ProcessedFeedback::AsElementAccess(), v8::internal::compiler::ProcessedFeedback::AsNamedAccess(), v8::internal::index, v8::internal::compiler::ProcessedFeedback::kElementAccess, v8::internal::key, v8::internal::compiler::ProcessedFeedback::kind(), v8::internal::compiler::ProcessedFeedback::kInsufficient, v8::internal::compiler::kLoad, v8::internal::compiler::ProcessedFeedback::kNamedAccess, v8::internal::compiler::NamedAccessFeedback::name(), v8::internal::name, PROCESS_AND_RETURN_IF_DONE, v8::base::internal::result, RETURN_IF_ABORT, and RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ BuildHasInPrototypeChain()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildHasInPrototypeChain ( ValueNode object,
compiler::HeapObjectRef  prototype 
)
private

Definition at line 12662 of file maglev-graph-builder.cc.

12663  {
12665  return AddNewNode<HasInPrototypeChain>({object}, prototype);
12666 }
MaybeReduceResult TryBuildFastHasInPrototypeChain(ValueNode *object, compiler::HeapObjectRef prototype)
kInstanceDescriptorsOffset kTransitionsOrPrototypeInfoOffset prototype
Definition: map-inl.h:69

References v8::internal::prototype, and RETURN_IF_DONE.

◆ BuildInitializeStore()

void v8::internal::maglev::MaglevGraphBuilder::BuildInitializeStore ( InlinedAllocation alloc,
ValueNode value,
int  offset 
)
private

Definition at line 5087 of file maglev-graph-builder.cc.

5088  {
5089  const bool value_is_trusted = value->Is<TrustedConstant>();
5090  DCHECK(value->is_tagged());
5091  if (InlinedAllocation* inlined_value = value->TryCast<InlinedAllocation>()) {
5092  // Add to the escape set.
5093  auto escape_deps = graph()->allocations_escape_map().find(object);
5094  CHECK(escape_deps != graph()->allocations_escape_map().end());
5095  escape_deps->second.push_back(inlined_value);
5096  // Add to the elided set.
5097  auto& elided_map = graph()->allocations_elide_map();
5098  auto elided_deps = elided_map.try_emplace(inlined_value, zone()).first;
5099  elided_deps->second.push_back(object);
5100  inlined_value->AddNonEscapingUses();
5101  }
5102  if (value_is_trusted) {
5103  BuildStoreTrustedPointerField(object, value, offset,
5104  value->Cast<TrustedConstant>()->tag(),
5106  } else {
5107  BuildStoreTaggedField(object, value, offset,
5109  }
5110 }
ZoneMap< InlinedAllocation *, SmallAllocationVector > & allocations_escape_map()
Definition: maglev-graph.h:155
ZoneMap< InlinedAllocation *, SmallAllocationVector > & allocations_elide_map()
Definition: maglev-graph.h:160
void BuildStoreTrustedPointerField(ValueNode *object, ValueNode *value, int offset, IndirectPointerTag tag, StoreTaggedMode store_mode)
Node * BuildStoreTaggedField(ValueNode *object, ValueNode *value, int offset, StoreTaggedMode store_mode)
Node::Uses::const_iterator end(const Node::Uses &uses)
Definition: node.h:711

References v8::internal::maglev::Graph::allocations_elide_map(), v8::internal::maglev::Graph::allocations_escape_map(), BuildStoreTaggedField(), BuildStoreTrustedPointerField(), CHECK, v8::internal::DCHECK(), v8::internal::compiler::end(), graph(), v8::internal::maglev::kInitializing, v8::internal::maglev::TrustedConstant::tag(), v8::internal::value, and zone().

+ Here is the call graph for this function:

◆ BuildInlinedAllocation()

InlinedAllocation * v8::internal::maglev::MaglevGraphBuilder::BuildInlinedAllocation ( VirtualObject object,
AllocationType  allocation 
)
private

Definition at line 13752 of file maglev-graph-builder.cc.

13753  {
13755  InlinedAllocation* allocation;
13756  switch (vobject->type()) {
13758  allocation =
13759  BuildInlinedAllocationForHeapNumber(vobject, allocation_type);
13760  break;
13762  allocation =
13763  BuildInlinedAllocationForDoubleFixedArray(vobject, allocation_type);
13764  break;
13766  allocation =
13767  BuildInlinedAllocationForConsString(vobject, allocation_type);
13768  break;
13769  case VirtualObject::kDefault: {
13770  SmallZoneVector<ValueNode*, 8> values(zone());
13771  vobject->ForEachInput([&](ValueNode*& node) {
13772  ValueNode* value_to_push;
13773  if (node->Is<VirtualObject>()) {
13774  VirtualObject* nested = node->Cast<VirtualObject>();
13775  node = BuildInlinedAllocation(nested, allocation_type);
13776  value_to_push = node;
13777  } else if (node->Is<Float64Constant>()) {
13778  value_to_push = BuildInlinedAllocationForHeapNumber(
13779  CreateHeapNumber(node->Cast<Float64Constant>()->value()),
13780  allocation_type);
13781  } else {
13782  value_to_push = GetTaggedValue(node);
13783  }
13784  values.push_back(value_to_push);
13785  });
13786  allocation =
13787  ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13788  AddNonEscapingUses(allocation, static_cast<int>(values.size()));
13789  if (vobject->has_static_map()) {
13790  AddNonEscapingUses(allocation, 1);
13791  BuildStoreMap(allocation, vobject->map(),
13793  }
13794  for (uint32_t i = 0; i < values.size(); i++) {
13795  BuildInitializeStore(allocation, values[i], (i + 1) * kTaggedSize);
13796  }
13797  if (is_loop_effect_tracking()) {
13798  loop_effects_->allocations.insert(allocation);
13799  }
13800  break;
13801  }
13802  }
13803  if (v8_flags.maglev_allocation_folding < 2) {
13805  }
13806  return allocation;
13807 }
void BuildInitializeStore(InlinedAllocation *alloc, ValueNode *value, int offset)
InlinedAllocation * ExtendOrReallocateCurrentAllocationBlock(AllocationType allocation_type, VirtualObject *value)
InlinedAllocation * BuildInlinedAllocationForConsString(VirtualObject *object, AllocationType allocation)
void BuildStoreMap(ValueNode *object, compiler::MapRef map, StoreMap::Kind kind)
InlinedAllocation * BuildInlinedAllocationForDoubleFixedArray(VirtualObject *object, AllocationType allocation)
InlinedAllocation * BuildInlinedAllocationForHeapNumber(VirtualObject *object, AllocationType allocation)
constexpr int kTaggedSize
Definition: globals.h:533

References v8::internal::maglev::VirtualObject::ForEachInput(), v8::internal::maglev::VirtualObject::has_static_map(), v8::internal::if(), v8::internal::maglev::NodeBase::Is(), v8::internal::kTaggedSize, v8::internal::maglev::VirtualObject::map(), v8::base::SmallVector< T, kSize, Allocator >::push_back(), v8::base::SmallVector< T, kSize, Allocator >::size(), v8::internal::maglev::VirtualObject::type(), and v8::internal::v8_flags.

Referenced by TryBuildNewConsString().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildInlinedAllocationForConsString()

InlinedAllocation * v8::internal::maglev::MaglevGraphBuilder::BuildInlinedAllocationForConsString ( VirtualObject object,
AllocationType  allocation 
)
private

Definition at line 13690 of file maglev-graph-builder.cc.

13691  {
13692  InlinedAllocation* allocation =
13693  ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13694  DCHECK_EQ(vobject->size(), sizeof(ConsString));
13695  DCHECK_EQ(vobject->cons_string().length->value_representation(),
13697  AddNonEscapingUses(allocation, 5);
13698  BuildInitializeStore(allocation, vobject->cons_string().map,
13700  AddNewNode<StoreInt32>(
13701  {allocation, GetInt32Constant(Name::kEmptyHashField)},
13702  static_cast<int>(offsetof(ConsString, raw_hash_field_)));
13703  AddNewNode<StoreInt32>({allocation, vobject->cons_string().length},
13704  static_cast<int>(offsetof(ConsString, length_)));
13705  BuildInitializeStore(allocation, vobject->cons_string().first(),
13706  offsetof(ConsString, first_));
13707  BuildInitializeStore(allocation, vobject->cons_string().second(),
13708  offsetof(ConsString, second_));
13709  if (is_loop_effect_tracking()) {
13710  loop_effects_->allocations.insert(allocation);
13711  }
13712  return allocation;
13713 }
static constexpr int kMapOffset
Definition: heap-object.h:498
static constexpr int kEmptyHashField
Definition: name.h:133

References v8::internal::maglev::VirtualObject::cons_string(), DCHECK_EQ, v8::internal::maglev::VirtualObject::VirtualConsString::first(), v8::internal::Name::kEmptyHashField, v8::internal::HeapObject::kMapOffset, v8::internal::length, v8::internal::maglev::VirtualObject::VirtualConsString::length, v8::internal::maglev::VirtualObject::VirtualConsString::map, v8::internal::maglev::VirtualObject::VirtualConsString::second(), v8::internal::maglev::VirtualObject::size(), and v8::internal::maglev::ValueNode::value_representation().

+ Here is the call graph for this function:

◆ BuildInlinedAllocationForDoubleFixedArray()

InlinedAllocation * v8::internal::maglev::MaglevGraphBuilder::BuildInlinedAllocationForDoubleFixedArray ( VirtualObject object,
AllocationType  allocation 
)
private

Definition at line 13729 of file maglev-graph-builder.cc.

13730  {
13731  DCHECK(vobject->map().IsFixedDoubleArrayMap());
13732  InlinedAllocation* allocation =
13733  ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13734  int length = vobject->double_elements_length();
13735  AddNonEscapingUses(allocation, length + 2);
13736  BuildStoreMap(allocation, broker()->fixed_double_array_map(),
13738  AddNewNode<StoreTaggedFieldNoWriteBarrier>(
13739  {allocation, GetSmiConstant(length)},
13740  static_cast<int>(offsetof(FixedDoubleArray, length_)),
13742  for (int i = 0; i < length; ++i) {
13743  AddNewNode<StoreFloat64>(
13744  {allocation,
13746  vobject->double_elements().GetFromImmutableFixedDoubleArray(i))},
13748  }
13749  return allocation;
13750 }

References broker(), v8::internal::DCHECK(), v8::internal::maglev::VirtualObject::double_elements(), v8::internal::maglev::VirtualObject::double_elements_length(), v8::internal::compiler::FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::length, v8::internal::maglev::VirtualObject::map(), and v8::internal::PrimitiveArrayBase< FixedDoubleArray, FixedDoubleArrayShape >::OffsetOfElementAt().

+ Here is the call graph for this function:

◆ BuildInlinedAllocationForHeapNumber()

InlinedAllocation * v8::internal::maglev::MaglevGraphBuilder::BuildInlinedAllocationForHeapNumber ( VirtualObject object,
AllocationType  allocation 
)
private

Definition at line 13715 of file maglev-graph-builder.cc.

13716  {
13717  DCHECK(vobject->map().IsHeapNumberMap());
13718  InlinedAllocation* allocation =
13719  ExtendOrReallocateCurrentAllocationBlock(allocation_type, vobject);
13720  AddNonEscapingUses(allocation, 2);
13721  BuildStoreMap(allocation, broker()->heap_number_map(),
13723  AddNewNode<StoreFloat64>({allocation, GetFloat64Constant(vobject->number())},
13724  static_cast<int>(offsetof(HeapNumber, value_)));
13725  return allocation;
13726 }

References broker(), v8::internal::DCHECK(), v8::internal::maglev::VirtualObject::map(), and v8::internal::maglev::VirtualObject::number().

+ Here is the call graph for this function:

◆ BuildInlinedArgumentsElements()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildInlinedArgumentsElements ( int  start_index,
int  length 
)
private

Definition at line 13809 of file maglev-graph-builder.cc.

13810  {
13811  DCHECK(is_inline());
13812  if (length == 0) {
13813  return GetRootConstant(RootIndex::kEmptyFixedArray);
13814  }
13815  VirtualObject* elements =
13816  CreateFixedArray(broker()->fixed_array_map(), length);
13817  for (int i = 0; i < length; i++) {
13818  elements->set(FixedArray::OffsetOfElementAt(i),
13819  caller_details_->arguments[i + start_index + 1]);
13820  }
13821  return elements;
13822 }

References broker(), v8::internal::DCHECK(), v8::internal::length, v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), and v8::internal::maglev::VirtualObject::set().

+ Here is the call graph for this function:

◆ BuildInlinedUnmappedArgumentsElements()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildInlinedUnmappedArgumentsElements ( int  mapped_count)
private

Definition at line 13824 of file maglev-graph-builder.cc.

13825  {
13827  if (length == 0) {
13828  return GetRootConstant(RootIndex::kEmptyFixedArray);
13829  }
13830  VirtualObject* unmapped_elements =
13831  CreateFixedArray(broker()->fixed_array_map(), length);
13832  int i = 0;
13833  for (; i < mapped_count; i++) {
13834  unmapped_elements->set(FixedArray::OffsetOfElementAt(i),
13835  GetRootConstant(RootIndex::kTheHoleValue));
13836  }
13837  for (; i < length; i++) {
13838  unmapped_elements->set(FixedArray::OffsetOfElementAt(i),
13839  caller_details_->arguments[i + 1]);
13840  }
13841  return unmapped_elements;
13842 }

References broker(), v8::internal::length, v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), and v8::internal::maglev::VirtualObject::set().

+ Here is the call graph for this function:

◆ BuildInlineFunction()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildInlineFunction ( SourcePosition  call_site_position,
ValueNode context,
ValueNode function,
ValueNode new_target 
)

Definition at line 8324 of file maglev-graph-builder.cc.

8326  {
8327  DCHECK(is_inline());
8328  DCHECK_GT(caller_details_->arguments.size(), 0);
8329 
8330  compiler::SharedFunctionInfoRef shared =
8332  compiler::BytecodeArrayRef bytecode = compilation_unit_->bytecode();
8333  compiler::FeedbackVectorRef feedback = compilation_unit_->feedback();
8334 
8335  if (v8_flags.maglev_print_inlined &&
8337  (v8_flags.print_maglev_code || v8_flags.print_maglev_graph ||
8338  v8_flags.print_maglev_graphs ||
8339  v8_flags.trace_maglev_inlining_verbose)) {
8340  std::cout << "== Inlining " << Brief(*shared.object()) << std::endl;
8342  if (v8_flags.maglev_print_feedback) {
8343  i::Print(*feedback.object(), std::cout);
8344  }
8345  } else if (v8_flags.trace_maglev_graph_building ||
8346  v8_flags.trace_maglev_inlining) {
8347  std::cout << "== Inlining " << shared.object() << std::endl;
8348  }
8349 
8350  graph()->inlined_functions().push_back(
8351  OptimizedCompilationInfo::InlinedFunctionHolder(
8352  shared.object(), bytecode.object(), call_site_position));
8353  if (feedback.object()->invocation_count_before_stable(kRelaxedLoad) >
8354  v8_flags.invocation_count_for_early_optimization) {
8356  }
8357  inlining_id_ = static_cast<int>(graph()->inlined_functions().size() - 1);
8358 
8360  current_source_position_ = SourcePosition(
8362 
8363  // Manually create the prologue of the inner function graph, so that we
8364  // can manually set up the arguments.
8366 
8367  // Set receiver.
8369 
8370  // The inlined function could call a builtin that iterates the frame, the
8371  // receiver needs to have been materialized.
8372  // TODO(victorgomes): Can we relax this requirement? Maybe we can allocate the
8373  // object lazily? This is also only required if the inlined function is not a
8374  // leaf (ie. it calls other functions).
8376 
8377  // Set remaining arguments.
8378  RootConstant* undefined_constant =
8379  GetRootConstant(RootIndex::kUndefinedValue);
8380  int args_count = static_cast<int>(caller_details_->arguments.size()) - 1;
8381  int formal_parameter_count = compilation_unit_->parameter_count() - 1;
8382  for (int i = 0; i < formal_parameter_count; i++) {
8383  ValueNode* arg_value =
8384  i < args_count ? caller_details_->arguments[i + 1] : undefined_constant;
8385  SetArgument(i + 1, arg_value);
8386  }
8387 
8388  inlined_new_target_ = new_target;
8389 
8390  BuildRegisterFrameInitialization(context, function, new_target);
8391  BuildMergeStates();
8392  EndPrologue();
8393  in_prologue_ = false;
8394 
8395  // Build the inlined function body.
8396  BuildBody();
8397 
8398  // All returns in the inlined body jump to a merge point one past the bytecode
8399  // length (i.e. at offset bytecode.length()). If there isn't one already,
8400  // create a block at this fake offset and have it jump out of the inlined
8401  // function, into a new block that we create which resumes execution of the
8402  // outer function.
8403  if (!current_block_) {
8404  // If we don't have a merge state at the inline_exit_offset, then there is
8405  // no control flow that reaches the end of the inlined function, either
8406  // because of infinite loops or deopts
8407  if (merge_states_[inline_exit_offset()] == nullptr) {
8408  if (v8_flags.trace_maglev_graph_building) {
8409  std::cout << "== Finished inlining (abort) " << shared.object()
8410  << std::endl;
8411  }
8412  return ReduceResult::DoneWithAbort();
8413  }
8414 
8415  ProcessMergePoint(inline_exit_offset(), /*preserve_kna*/ false);
8416  StartNewBlock(inline_exit_offset(), /*predecessor*/ nullptr);
8417  }
8418 
8419  if (v8_flags.trace_maglev_graph_building) {
8420  std::cout << "== Finished inlining " << shared.object() << std::endl;
8421  }
8422 
8423  // Pull the returned accumulator value out of the inlined function's final
8424  // merged return state.
8426 }
V8_EXPORT_PRIVATE void Disassemble(std::ostream &os)
IndirectHandle< BytecodeArray > object() const
IndirectHandle< FeedbackVector > object() const
ZoneVector< OptimizedCompilationInfo::InlinedFunctionHolder > & inlined_functions()
Definition: maglev-graph.h:167
compiler::BytecodeArrayRef bytecode() const
compiler::FeedbackVectorRef feedback() const
void ProcessMergePoint(int offset, bool preserve_known_node_aspects)
void StartNewBlock(int offset, BasicBlock *predecessor)
void Print(Tagged< Object > obj)
Definition: objects.h:772
static constexpr RelaxedLoadTag kRelaxedLoad
Definition: globals.h:2943

References v8::internal::DCHECK(), DCHECK_GT, DCHECK_NE, DCHECK_NOT_NULL, v8::internal::BytecodeArray::Disassemble(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::ForceEscapeIfAllocation(), graph(), v8::internal::SourcePosition::kNotInlined, v8::kRelaxedLoad, v8::internal::compiler::FeedbackVectorRef::object(), v8::internal::compiler::BytecodeArrayRef::object(), v8::internal::compiler::SharedFunctionInfoRef::object(), v8::internal::Print(), and v8::internal::v8_flags.

Referenced by v8::internal::maglev::MaglevInliner::BuildInlineFunction().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildInt32BinaryOperationNode()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildInt32BinaryOperationNode
private

Definition at line 2424 of file maglev-graph-builder.cc.

2424  {
2425  // Use BuildTruncatingInt32BinaryOperationNodeForToNumber with Smi input hint
2426  // for truncating operations.
2427  static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
2428  ValueNode* left = LoadRegister(0);
2429  ValueNode* right = GetAccumulator();
2431  TryFoldInt32BinaryOperation<kOperation>(left, right), SetAccumulator);
2432  using OpNodeT = Int32NodeFor<kOperation>;
2433  SetAccumulator(AddNewNode<OpNodeT>({left, right}));
2434  return ReduceResult::Done();
2435 }

References v8::internal::maglev::ReduceResult::Done(), GetAccumulator(), LoadRegister(), PROCESS_AND_RETURN_IF_DONE, and SetAccumulator().

+ Here is the call graph for this function:

◆ BuildInt32BinarySmiOperationNode()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildInt32BinarySmiOperationNode
private

Definition at line 2464 of file maglev-graph-builder.cc.

2464  {
2465  // Truncating Int32 nodes treat their input as a signed int32 regardless
2466  // of whether it's really signed or not, so we allow Uint32 by loading a
2467  // TruncatedInt32 value.
2468  static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
2469  ValueNode* left = GetAccumulator();
2470  int32_t constant = iterator_.GetImmediateOperand(0);
2471  if (std::optional<int>(constant) == Int32Identity<kOperation>()) {
2472  // Deopt if {left} is not an Int32.
2473  EnsureInt32(left);
2474  // If the constant is the unit of the operation, it already has the right
2475  // value, so just return.
2476  return ReduceResult::Done();
2477  }
2479  TryFoldInt32BinaryOperation<kOperation>(left, constant), SetAccumulator);
2480  ValueNode* right = GetInt32Constant(constant);
2481  using OpNodeT = Int32NodeFor<kOperation>;
2482  SetAccumulator(AddNewNode<OpNodeT>({left, right}));
2483  return ReduceResult::Done();
2484 }
void EnsureInt32(ValueNode *value, bool can_be_heap_number=false)
int int32_t
Definition: unicode.cc:42

References v8::internal::maglev::ReduceResult::Done(), EnsureInt32(), GetAccumulator(), v8::internal::interpreter::BytecodeArrayIterator::GetImmediateOperand(), GetInt32Constant(), iterator_, PROCESS_AND_RETURN_IF_DONE, and SetAccumulator().

+ Here is the call graph for this function:

◆ BuildInt32UnaryOperationNode()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildInt32UnaryOperationNode
private

Definition at line 2334 of file maglev-graph-builder.cc.

2334  {
2335  // Use BuildTruncatingInt32BitwiseNotForToNumber with Smi input hint
2336  // for truncating operations.
2337  static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
2338  ValueNode* value = GetAccumulator();
2339  PROCESS_AND_RETURN_IF_DONE(TryFoldInt32UnaryOperation<kOperation>(value),
2340  SetAccumulator);
2341  using OpNodeT = Int32NodeFor<kOperation>;
2342  SetAccumulator(AddNewNode<OpNodeT>({value}));
2343  return ReduceResult::Done();
2344 }

References v8::internal::maglev::ReduceResult::Done(), GetAccumulator(), PROCESS_AND_RETURN_IF_DONE, SetAccumulator(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildJSArrayBuiltinMapSwitchOnElementsKind()

template<typename MapKindsT , typename IndexToElementsKindFunc , typename BuildKindSpecificFunc >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildJSArrayBuiltinMapSwitchOnElementsKind ( ValueNode receiver,
const MapKindsT &  map_kinds,
MaglevSubGraphBuilder sub_graph,
std::optional< MaglevSubGraphBuilder::Label > &  do_return,
int  unique_kind_count,
IndexToElementsKindFunc &&  index_to_elements_kind,
BuildKindSpecificFunc &&  build_kind_specific 
)
private

Definition at line 9948 of file maglev-graph-builder.cc.

9953  {
9954  // TODO(pthier): Support map packing.
9956  ValueNode* receiver_map =
9958  int emitted_kind_checks = 0;
9959  bool any_successful = false;
9960  for (size_t kind_index = 0; kind_index < map_kinds.size(); kind_index++) {
9961  const auto& maps = map_kinds[kind_index];
9962  // Skip kinds we haven't observed.
9963  if (maps.empty()) continue;
9964  ElementsKind kind = index_to_elements_kind(kind_index);
9965  // Create branches for all but the last elements kind. We don't need
9966  // to check the maps of the last kind, as all possible maps have already
9967  // been checked when the property (builtin name) was loaded.
9968  if (++emitted_kind_checks < unique_kind_count) {
9969  MaglevSubGraphBuilder::Label check_next_map(&sub_graph, 1);
9970  std::optional<MaglevSubGraphBuilder::Label> do_push;
9971  if (maps.size() > 1) {
9972  do_push.emplace(&sub_graph, static_cast<int>(maps.size()));
9973  for (size_t map_index = 1; map_index < maps.size(); map_index++) {
9974  sub_graph.GotoIfTrue<BranchIfReferenceEqual>(
9975  &*do_push, {receiver_map, GetConstant(maps[map_index])});
9976  }
9977  }
9978  sub_graph.GotoIfFalse<BranchIfReferenceEqual>(
9979  &check_next_map, {receiver_map, GetConstant(maps[0])});
9980  if (do_push.has_value()) {
9981  sub_graph.Goto(&*do_push);
9982  sub_graph.Bind(&*do_push);
9983  }
9984  if (!build_kind_specific(kind).IsDoneWithAbort()) {
9985  any_successful = true;
9986  }
9987  DCHECK(do_return.has_value());
9988  sub_graph.GotoOrTrim(&*do_return);
9989  sub_graph.Bind(&check_next_map);
9990  } else {
9991  if (!build_kind_specific(kind).IsDoneWithAbort()) {
9992  any_successful = true;
9993  }
9994  if (do_return.has_value()) {
9995  sub_graph.GotoOrTrim(&*do_return);
9996  }
9997  }
9998  }
9999  DCHECK_IMPLIES(!any_successful, !current_block_);
10000  return any_successful ? ReduceResult::Done() : ReduceResult::DoneWithAbort();
10001 }
ValueNode * BuildLoadTaggedField(ValueNode *object, uint32_t offset, Args &&... args)

References v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(), v8::internal::DCHECK(), DCHECK_IMPLIES, v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Goto(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfFalse(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfTrue(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoOrTrim(), v8::internal::HeapObject::kMapOffset, and V8_MAP_PACKING_BOOL.

+ Here is the call graph for this function:

◆ BuildLoadConstantTypedArrayElement()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadConstantTypedArrayElement ( compiler::JSTypedArrayRef  typed_array,
ValueNode index,
ElementsKind  elements_kind 
)
private

Definition at line 6370 of file maglev-graph-builder.cc.

6372  {
6373 #define BUILD_AND_RETURN_LOAD_CONSTANT_TYPED_ARRAY(Type) \
6374  return AddNewNode<Load##Type##ConstantTypedArrayElement>( \
6375  {index}, typed_array, elements_kind);
6376 
6377  switch (elements_kind) {
6378  case INT8_ELEMENTS:
6379  case INT16_ELEMENTS:
6380  case INT32_ELEMENTS:
6383  case UINT8_ELEMENTS:
6384  case UINT16_ELEMENTS:
6385  case UINT32_ELEMENTS:
6387  case FLOAT32_ELEMENTS:
6388  case FLOAT64_ELEMENTS:
6390  default:
6391  UNREACHABLE();
6392  }
6393 #undef BUILD_AND_RETURN_LOAD_CONSTANTTYPED_ARRAY
6394 }
#define BUILD_AND_RETURN_LOAD_CONSTANT_TYPED_ARRAY(Type)
UINT32_ELEMENTS INT32_ELEMENTS FLOAT32_ELEMENTS
Definition: maglev-ir.h:8756
UINT32_ELEMENTS INT8_ELEMENTS
Definition: maglev-ir.h:8747
UINT32_ELEMENTS INT16_ELEMENTS
Definition: maglev-ir.h:8747

References BUILD_AND_RETURN_LOAD_CONSTANT_TYPED_ARRAY, v8::internal::maglev::FLOAT32_ELEMENTS, v8::internal::maglev::INT16_ELEMENTS, v8::internal::maglev::INT8_ELEMENTS, v8::internal::maglev::UINT16_ELEMENTS, v8::internal::maglev::UINT8_CLAMPED_ELEMENTS, v8::internal::maglev::UINT8_ELEMENTS, and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ BuildLoadContextSlot()

void v8::internal::maglev::MaglevGraphBuilder::BuildLoadContextSlot ( ValueNode context,
size_t  depth,
int  slot_index,
ContextSlotMutability  slot_mutability,
ContextMode  context_mode 
)
private

Definition at line 3781 of file maglev-graph-builder.cc.

3783  {
3784  context = GetContextAtDepth(context, depth);
3787  slot_mutability)) {
3788  return; // Our work here is done.
3789  }
3790 
3791  // Always load the slot here as if it were mutable. Immutable slots have a
3792  // narrow range of mutability if the context escapes before the slot is
3793  // initialized, so we can't safely assume that the load can be cached in case
3794  // it's a load before initialization (e.g. var a = a + 42).
3796  LoadAndCacheContextSlot(context, slot_index, kMutable, context_mode));
3797 }
ValueNode * LoadAndCacheContextSlot(ValueNode *context, int offset, ContextSlotMutability slot_mutability, ContextMode context_mode)
ValueNode * GetContextAtDepth(ValueNode *context, size_t depth)
bool TrySpecializeLoadContextSlotToFunctionContext(ValueNode *context, int slot_index, ContextSlotMutability slot_mutability)

References compilation_unit_, current_interpreter_frame_, GetContextAtDepth(), v8::internal::maglev::MaglevCompilationUnit::info(), kMutable, LoadAndCacheContextSlot(), v8::internal::maglev::InterpreterFrameState::set_accumulator(), v8::internal::maglev::MaglevCompilationInfo::specialize_to_function_context(), and TrySpecializeLoadContextSlotToFunctionContext().

+ Here is the call graph for this function:

◆ BuildLoadElements()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadElements ( ValueNode object)
private

Definition at line 6285 of file maglev-graph-builder.cc.

6285  {
6286  MaybeReduceResult known_elements =
6287  TryFindLoadedProperty(known_node_aspects().loaded_properties, object,
6289  if (known_elements.IsDone()) {
6290  DCHECK(known_elements.IsDoneWithValue());
6291  if (v8_flags.trace_maglev_graph_building) {
6292  std::cout << " * Reusing non-constant [Elements] "
6293  << PrintNodeLabel(graph_labeller(), known_elements.value())
6294  << ": " << PrintNode(graph_labeller(), known_elements.value())
6295  << std::endl;
6296  }
6297  return known_elements.value();
6298  }
6299 
6300  DCHECK_EQ(JSObject::kElementsOffset, JSArray::kElementsOffset);
6301  ValueNode* elements = BuildLoadTaggedField(object, JSObject::kElementsOffset);
6302  RecordKnownProperty(object,
6304  elements, false, compiler::AccessMode::kLoad);
6305  return elements;
6306 }
void RecordKnownProperty(ValueNode *lookup_start_object, KnownNodeAspects::LoadedPropertyMapKey key, ValueNode *value, bool is_const, compiler::AccessMode access_mode)
MaybeReduceResult TryFindLoadedProperty(const KnownNodeAspects::LoadedPropertyMap &loaded_properties, ValueNode *lookup_start_object, KnownNodeAspects::LoadedPropertyMapKey name)

References v8::internal::DCHECK(), DCHECK_EQ, v8::internal::maglev::MaybeReduceResult::IsDone(), v8::internal::maglev::MaybeReduceResult::IsDoneWithValue(), v8::internal::compiler::kLoad, v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::TryFindLoadedProperty(), v8::internal::v8_flags, and v8::internal::maglev::MaybeReduceResult::value().

+ Here is the call graph for this function:

◆ BuildLoadField()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadField ( compiler::PropertyAccessInfo const &  access_info,
ValueNode lookup_start_object,
compiler::NameRef  name 
)
private

Definition at line 5533 of file maglev-graph-builder.cc.

5535  {
5536  compiler::OptionalJSObjectRef constant_holder =
5537  TryGetConstantDataFieldHolder(access_info, lookup_start_object);
5538  if (constant_holder) {
5539  if (access_info.field_representation().IsDouble()) {
5540  std::optional<Float64> constant =
5541  TryFoldLoadConstantDoubleField(constant_holder.value(), access_info);
5542  if (constant.has_value()) {
5543  return GetFloat64Constant(constant.value());
5544  }
5545  } else {
5546  compiler::OptionalObjectRef constant =
5547  TryFoldLoadConstantDataField(constant_holder.value(), access_info);
5548  if (constant.has_value()) {
5549  return GetConstant(constant.value());
5550  }
5551  }
5552  }
5553  // Resolve property holder.
5554  ValueNode* load_source;
5555  if (access_info.holder().has_value()) {
5556  load_source = GetConstant(access_info.holder().value());
5557  } else {
5558  load_source = lookup_start_object;
5559  }
5560 
5561  FieldIndex field_index = access_info.field_index();
5562  if (!field_index.is_inobject()) {
5563  // The field is in the property array, first load it from there.
5564  load_source =
5565  BuildLoadTaggedField(load_source, JSReceiver::kPropertiesOrHashOffset);
5566  }
5567 
5568  // Do the load.
5569  if (field_index.is_double()) {
5570  return AddNewNode<LoadDoubleField>({load_source}, field_index.offset());
5571  }
5572  ValueNode* value = BuildLoadTaggedField<LoadTaggedFieldForProperty>(
5573  load_source, field_index.offset(), name);
5574  // Insert stable field information if present.
5575  if (access_info.field_representation().IsSmi()) {
5576  NodeInfo* known_info = GetOrCreateInfoFor(value);
5577  known_info->IntersectType(NodeType::kSmi);
5578  } else if (access_info.field_representation().IsHeapObject()) {
5579  NodeInfo* known_info = GetOrCreateInfoFor(value);
5580  if (access_info.field_map().has_value() &&
5581  access_info.field_map().value().is_stable()) {
5582  DCHECK(access_info.field_map().value().IsJSReceiverMap());
5583  auto map = access_info.field_map().value();
5584  known_info->SetPossibleMaps(PossibleMaps{map}, false,
5585  StaticTypeForMap(map, broker()), broker());
5587  } else {
5588  known_info->IntersectType(NodeType::kAnyHeapObject);
5589  }
5590  }
5591  return value;
5592 }
std::optional< Float64 > TryFoldLoadConstantDoubleField(compiler::JSObjectRef holder, compiler::PropertyAccessInfo const &access_info)
compiler::OptionalJSObjectRef TryGetConstantDataFieldHolder(compiler::PropertyAccessInfo const &access_info, ValueNode *lookup_start_object)
compiler::OptionalObjectRef TryFoldLoadConstantDataField(compiler::JSObjectRef holder, compiler::PropertyAccessInfo const &access_info)
NodeType StaticTypeForMap(compiler::MapRef map, compiler::JSHeapBroker *broker)
Definition: maglev-ir.h:842
compiler::ZoneRefSet< Map > PossibleMaps

References broker(), BuildLoadTaggedField(), v8::internal::DCHECK(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnStableMap(), v8::internal::compiler::PropertyAccessInfo::field_index(), v8::internal::compiler::PropertyAccessInfo::field_map(), v8::internal::compiler::PropertyAccessInfo::field_representation(), GetConstant(), GetFloat64Constant(), GetOrCreateInfoFor(), v8::internal::compiler::PropertyAccessInfo::holder(), v8::internal::maglev::NodeInfo::IntersectType(), v8::internal::FieldIndex::is_double(), v8::internal::FieldIndex::is_inobject(), v8::internal::Representation::IsDouble(), v8::internal::Representation::IsHeapObject(), v8::internal::Representation::IsSmi(), v8::internal::compiler::kSmi, v8::internal::name, v8::internal::FieldIndex::offset(), v8::internal::maglev::NodeInfo::SetPossibleMaps(), v8::internal::maglev::StaticTypeForMap(), TryFoldLoadConstantDataField(), TryFoldLoadConstantDoubleField(), TryGetConstantDataFieldHolder(), and v8::internal::value.

Referenced by TryBuildPropertyLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildLoadFixedArrayElement() [1/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadFixedArrayElement ( ValueNode elements,
int  index 
)
private

Definition at line 5294 of file maglev-graph-builder.cc.

5295  {
5296  compiler::OptionalHeapObjectRef maybe_constant;
5297  if ((maybe_constant = TryGetConstant(elements)) &&
5298  maybe_constant.value().IsFixedArray()) {
5299  compiler::FixedArrayRef fixed_array_ref =
5300  maybe_constant.value().AsFixedArray();
5301  if (index >= 0 && static_cast<uint32_t>(index) < fixed_array_ref.length()) {
5302  compiler::OptionalObjectRef maybe_value =
5303  fixed_array_ref.TryGet(broker(), index);
5304  if (maybe_value) return GetConstant(*maybe_value);
5305  } else {
5306  return GetRootConstant(RootIndex::kTheHoleValue);
5307  }
5308  }
5310  VirtualObject* vobject =
5311  GetObjectFromAllocation(elements->Cast<InlinedAllocation>());
5312  CHECK_EQ(vobject->type(), VirtualObject::kDefault);
5313  DCHECK(vobject->map().IsFixedArrayMap());
5314  ValueNode* length_node = vobject->get(offsetof(FixedArray, length_));
5315  if (auto length = TryGetInt32Constant(length_node)) {
5316  if (index >= 0 && index < length.value()) {
5317  return vobject->get(FixedArray::OffsetOfElementAt(index));
5318  } else {
5319  return GetRootConstant(RootIndex::kTheHoleValue);
5320  }
5321  }
5322  }
5323  if (index < 0 || index >= FixedArray::kMaxLength) {
5324  return GetRootConstant(RootIndex::kTheHoleValue);
5325  }
5326  return AddNewNode<LoadTaggedField>({elements},
5328 }
static constexpr int kMaxLength
Definition: fixed-array.h:272
bool CanTrackObjectChanges(ValueNode *object, TrackObjectMode mode)
VirtualObject * GetObjectFromAllocation(InlinedAllocation *allocation)

References broker(), CanTrackObjectChanges(), v8::internal::maglev::NodeBase::Cast(), CHECK_EQ, v8::internal::DCHECK(), v8::internal::maglev::VirtualObject::get(), GetConstant(), GetObjectFromAllocation(), GetRootConstant(), v8::internal::index, v8::internal::maglev::VirtualObject::kDefault, kLoad, v8::internal::FixedArray::kMaxLength, v8::internal::length, v8::internal::compiler::FixedArrayBaseRef::length(), v8::internal::maglev::VirtualObject::map(), v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), v8::internal::compiler::FixedArrayRef::TryGet(), TryGetConstant(), TryGetInt32Constant(), and v8::internal::maglev::VirtualObject::type().

Referenced by BuildLoadFixedArrayElement().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildLoadFixedArrayElement() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadFixedArrayElement ( ValueNode elements,
ValueNode index 
)
private

Definition at line 5330 of file maglev-graph-builder.cc.

5331  {
5332  if (auto constant = TryGetInt32Constant(index)) {
5333  return BuildLoadFixedArrayElement(elements, constant.value());
5334  }
5335  return AddNewNode<LoadFixedArrayElement>({elements, index});
5336 }
ValueNode * BuildLoadFixedArrayElement(ValueNode *elements, int index)

References BuildLoadFixedArrayElement(), v8::internal::index, and TryGetInt32Constant().

+ Here is the call graph for this function:

◆ BuildLoadFixedArrayLength()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadFixedArrayLength ( ValueNode fixed_array)
private

Definition at line 5594 of file maglev-graph-builder.cc.

5595  {
5596  ValueNode* length =
5597  BuildLoadTaggedField(fixed_array, offsetof(FixedArray, length_));
5599  return length;
5600 }

References BuildLoadTaggedField(), EnsureType(), v8::internal::compiler::kSmi, and v8::internal::length.

+ Here is the call graph for this function:

◆ BuildLoadFixedDoubleArrayElement() [1/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadFixedDoubleArrayElement ( ValueNode elements,
int  index 
)
private

Definition at line 5352 of file maglev-graph-builder.cc.

5353  {
5355  VirtualObject* vobject =
5356  GetObjectFromAllocation(elements->Cast<InlinedAllocation>());
5357  compiler::FixedDoubleArrayRef elements_array = vobject->double_elements();
5358  if (index >= 0 && static_cast<uint32_t>(index) < elements_array.length()) {
5359  Float64 value = elements_array.GetFromImmutableFixedDoubleArray(index);
5360  return GetFloat64Constant(value.get_scalar());
5361  } else {
5362  return GetRootConstant(RootIndex::kTheHoleValue);
5363  }
5364  }
5365  if (index < 0 || index >= FixedArray::kMaxLength) {
5366  return GetRootConstant(RootIndex::kTheHoleValue);
5367  }
5368  return AddNewNode<LoadFixedDoubleArrayElement>(
5369  {elements, GetInt32Constant(index)});
5370 }

References CanTrackObjectChanges(), v8::internal::maglev::NodeBase::Cast(), v8::internal::maglev::VirtualObject::double_elements(), GetFloat64Constant(), v8::internal::compiler::FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(), GetInt32Constant(), GetObjectFromAllocation(), GetRootConstant(), v8::internal::index, kLoad, v8::internal::FixedArray::kMaxLength, v8::internal::compiler::FixedArrayBaseRef::length(), and v8::internal::value.

Referenced by BuildLoadFixedDoubleArrayElement().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildLoadFixedDoubleArrayElement() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadFixedDoubleArrayElement ( ValueNode elements,
ValueNode index 
)
private

Definition at line 5372 of file maglev-graph-builder.cc.

5373  {
5374  if (auto constant = TryGetInt32Constant(index)) {
5375  return BuildLoadFixedDoubleArrayElement(elements, constant.value());
5376  }
5377  return AddNewNode<LoadFixedDoubleArrayElement>({elements, index});
5378 }
ValueNode * BuildLoadFixedDoubleArrayElement(ValueNode *elements, int index)

References BuildLoadFixedDoubleArrayElement(), v8::internal::index, and TryGetInt32Constant().

+ Here is the call graph for this function:

◆ BuildLoadGlobal()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildLoadGlobal ( compiler::NameRef  name,
compiler::FeedbackSource feedback_source,
TypeofMode  typeof_mode 
)
private

Definition at line 7782 of file maglev-graph-builder.cc.

7784  {
7785  const compiler::ProcessedFeedback& access_feedback =
7786  broker()->GetFeedbackForGlobalAccess(feedback_source);
7787 
7788  if (access_feedback.IsInsufficient()) {
7789  return EmitUnconditionalDeopt(
7790  DeoptimizeReason::kInsufficientTypeFeedbackForGenericGlobalAccess);
7791  }
7792 
7793  const compiler::GlobalAccessFeedback& global_access_feedback =
7794  access_feedback.AsGlobalAccess();
7795  PROCESS_AND_RETURN_IF_DONE(TryBuildGlobalLoad(global_access_feedback),
7796  SetAccumulator);
7797 
7798  ValueNode* context = GetContext();
7800  AddNewNode<LoadGlobal>({context}, name, feedback_source, typeof_mode));
7801  return ReduceResult::Done();
7802 }
ProcessedFeedback const & GetFeedbackForGlobalAccess(FeedbackSource const &source)
MaybeReduceResult TryBuildGlobalLoad(const compiler::GlobalAccessFeedback &global_access_feedback)

References v8::internal::compiler::ProcessedFeedback::AsGlobalAccess(), broker(), v8::internal::compiler::ProcessedFeedback::IsInsufficient(), v8::internal::name, and PROCESS_AND_RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ BuildLoadHoleyFixedDoubleArrayElement()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadHoleyFixedDoubleArrayElement ( ValueNode elements,
ValueNode index,
bool  convert_hole 
)
private

Definition at line 5387 of file maglev-graph-builder.cc.

5388  {
5389  if (convert_hole) {
5390  return AddNewNode<LoadHoleyFixedDoubleArrayElement>({elements, index});
5391  } else {
5392  return AddNewNode<LoadHoleyFixedDoubleArrayElementCheckedNotHole>(
5393  {elements, index});
5394  }
5395 }

References v8::internal::index.

◆ BuildLoadJSArrayLength()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadJSArrayLength ( ValueNode js_array,
NodeType  length_type = NodeType::kSmi 
)
private

Definition at line 5602 of file maglev-graph-builder.cc.

5603  {
5604  // TODO(leszeks): JSArray.length is known to be non-constant, don't bother
5605  // searching the constant values.
5606  MaybeReduceResult known_length =
5607  TryReuseKnownPropertyLoad(js_array, broker()->length_string());
5608  if (known_length.IsDone()) {
5609  DCHECK(known_length.IsDoneWithValue());
5610  return known_length.value();
5611  }
5612 
5613  ValueNode* length = BuildLoadTaggedField<LoadTaggedFieldForProperty>(
5614  js_array, JSArray::kLengthOffset, broker()->length_string());
5615  GetOrCreateInfoFor(length)->IntersectType(length_type);
5616  RecordKnownProperty(js_array, broker()->length_string(), length, false,
5618  return length;
5619 }

References broker(), v8::internal::DCHECK(), GetOrCreateInfoFor(), v8::internal::maglev::NodeInfo::IntersectType(), v8::internal::maglev::MaybeReduceResult::IsDone(), v8::internal::maglev::MaybeReduceResult::IsDoneWithValue(), v8::internal::compiler::kLoad, v8::internal::length, RecordKnownProperty(), TryReuseKnownPropertyLoad(), and v8::internal::maglev::MaybeReduceResult::value().

+ Here is the call graph for this function:

◆ BuildLoadJSFunctionContext()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadJSFunctionContext ( ValueNode closure)
private

Definition at line 5635 of file maglev-graph-builder.cc.

5635  {
5636  DCHECK(NodeTypeIs(GetType(closure), NodeType::kJSFunction));
5637  if (auto constant = closure->TryGetConstant(broker())) {
5638  if (constant->IsJSFunction()) {
5639  return GetConstant(constant->AsJSFunction().context(broker()));
5640  }
5641  }
5642  ValueNode* context =
5643  BuildLoadTaggedField(closure, JSFunction::kContextOffset);
5644  EnsureType(context, NodeType::kContext);
5645  return context;
5646 }

References broker(), BuildLoadTaggedField(), v8::internal::DCHECK(), EnsureType(), GetConstant(), GetType(), v8::internal::maglev::NodeTypeIs(), and v8::internal::maglev::ValueNode::TryGetConstant().

+ Here is the call graph for this function:

◆ BuildLoadJSFunctionFeedbackCell()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadJSFunctionFeedbackCell ( ValueNode closure)
private

Definition at line 5621 of file maglev-graph-builder.cc.

5622  {
5623  DCHECK(NodeTypeIs(GetType(closure), NodeType::kJSFunction));
5624  if (auto constant = closure->TryGetConstant(broker())) {
5625  if (constant->IsJSFunction()) {
5626  return GetConstant(constant->AsJSFunction().raw_feedback_cell(broker()));
5627  }
5628  }
5629  if (auto fast_closure = closure->TryCast<FastCreateClosure>()) {
5630  return GetConstant(fast_closure->feedback_cell());
5631  }
5632  return BuildLoadTaggedField(closure, JSFunction::kFeedbackCellOffset);
5633 }

References broker(), BuildLoadTaggedField(), v8::internal::DCHECK(), GetConstant(), GetType(), v8::internal::maglev::NodeTypeIs(), v8::internal::maglev::NodeBase::TryCast(), and v8::internal::maglev::ValueNode::TryGetConstant().

+ Here is the call graph for this function:

◆ BuildLoadStringLength()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadStringLength ( ValueNode string)
private

Definition at line 7456 of file maglev-graph-builder.cc.

7456  {
7457  DCHECK(NodeTypeIs(GetType(string), NodeType::kString));
7458  if (auto vo_string = string->TryCast<InlinedAllocation>()) {
7459  if (vo_string->object()->type() == VirtualObject::kConsString) {
7460  return vo_string->object()->string_length();
7461  }
7462  }
7463  if (auto const_string = string->TryGetConstant(broker())) {
7464  if (const_string->IsString()) {
7465  return GetInt32Constant(const_string->AsString().length());
7466  }
7467  }
7468  if (MaybeReduceResult result = TryFindLoadedProperty(
7469  known_node_aspects().loaded_constant_properties, string,
7471  result.IsDone()) {
7472  if (v8_flags.trace_maglev_graph_building && result.IsDoneWithValue()) {
7473  std::cout << " * Reusing constant [String length]"
7474  << PrintNodeLabel(graph_labeller(), result.value()) << ": "
7475  << PrintNode(graph_labeller(), result.value()) << std::endl;
7476  }
7477  return result.value();
7478  }
7479  ValueNode* result = AddNewNode<StringLength>({string});
7480  RecordKnownProperty(string,
7483  return result;
7484 }
template const char * string

References broker(), v8::internal::DCHECK(), v8::internal::compiler::kLoad, v8::internal::maglev::NodeTypeIs(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::base::internal::result, v8::internal::string, v8::anonymous_namespace{api.cc}::StringLength(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::TryFindLoadedProperty(), and v8::internal::v8_flags.

Referenced by TryBuildNewConsString(), and TryBuildPropertyLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildLoadTaggedField()

template<typename Instruction = LoadTaggedField, typename... Args>
ValueNode* v8::internal::maglev::MaglevGraphBuilder::BuildLoadTaggedField ( ValueNode object,
uint32_t  offset,
Args &&...  args 
)
inlineprivate

Definition at line 2399 of file maglev-graph-builder.h.

2400  {
2401  if (offset != HeapObject::kMapOffset &&
2403  VirtualObject* vobject =
2404  GetObjectFromAllocation(object->Cast<InlinedAllocation>());
2405  ValueNode* value;
2406  CHECK_NE(vobject->type(), VirtualObject::kHeapNumber);
2407  if (vobject->type() == VirtualObject::kDefault) {
2408  value = vobject->get(offset);
2409  } else {
2410  DCHECK_EQ(vobject->type(), VirtualObject::kFixedDoubleArray);
2411  // The only offset we're allowed to read from the a FixedDoubleArray as
2412  // tagged field is the length.
2413  CHECK_EQ(offset, offsetof(FixedDoubleArray, length_));
2414  value = GetInt32Constant(vobject->double_elements_length());
2415  }
2416  if (v8_flags.trace_maglev_object_tracking) {
2417  std::cout << " * Reusing value in virtual object "
2418  << PrintNodeLabel(graph_labeller(), vobject) << "[" << offset
2419  << "]: " << PrintNode(graph_labeller(), value) << std::endl;
2420  }
2421  return value;
2422  }
2423  return AddNewNode<Instruction>({object}, offset,
2424  std::forward<Args>(args)...);
2425  }
#define CHECK_NE(lhs, rhs)

References v8::base::args, v8::internal::maglev::NodeBase::Cast(), CHECK_EQ, CHECK_NE, DCHECK_EQ, v8::internal::maglev::VirtualObject::double_elements_length(), v8::internal::maglev::VirtualObject::get(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::maglev::VirtualObject::type(), v8::internal::v8_flags, and v8::internal::value.

Referenced by BuildLoadField(), BuildLoadFixedArrayLength(), BuildLoadJSFunctionContext(), BuildLoadJSFunctionFeedbackCell(), BuildNewConsStringMap(), TryBuildPropertyCellLoad(), TryBuildPropertyLoad(), TryBuildStoreField(), and TrySpecializeLoadContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildLoadTypedArrayElement()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLoadTypedArrayElement ( ValueNode object,
ValueNode index,
ElementsKind  elements_kind 
)
private

Definition at line 6345 of file maglev-graph-builder.cc.

6346  {
6347 #define BUILD_AND_RETURN_LOAD_TYPED_ARRAY(Type) \
6348  return AddNewNode<Load##Type##TypedArrayElement>({object, index}, \
6349  elements_kind);
6350 
6351  switch (elements_kind) {
6352  case INT8_ELEMENTS:
6353  case INT16_ELEMENTS:
6354  case INT32_ELEMENTS:
6357  case UINT8_ELEMENTS:
6358  case UINT16_ELEMENTS:
6359  case UINT32_ELEMENTS:
6360  BUILD_AND_RETURN_LOAD_TYPED_ARRAY(UnsignedInt);
6361  case FLOAT32_ELEMENTS:
6362  case FLOAT64_ELEMENTS:
6364  default:
6365  UNREACHABLE();
6366  }
6367 #undef BUILD_AND_RETURN_LOAD_TYPED_ARRAY
6368 }
#define BUILD_AND_RETURN_LOAD_TYPED_ARRAY(Type)

References BUILD_AND_RETURN_LOAD_TYPED_ARRAY, v8::internal::maglev::FLOAT32_ELEMENTS, v8::internal::maglev::INT16_ELEMENTS, v8::internal::maglev::INT8_ELEMENTS, v8::internal::maglev::UINT16_ELEMENTS, v8::internal::maglev::UINT8_CLAMPED_ELEMENTS, v8::internal::maglev::UINT8_ELEMENTS, and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ BuildLoadTypedArrayLength()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildLoadTypedArrayLength ( ValueNode object,
ElementsKind  elements_kind 
)
private

Definition at line 6308 of file maglev-graph-builder.cc.

6309  {
6311  bool is_variable_length = IsRabGsabTypedArrayElementsKind(elements_kind);
6312 
6313  if (!is_variable_length) {
6314  if (auto const_object = object->TryGetConstant(broker())) {
6315  // TODO(marja): Add TryGetConstant<JSTypedArray>().
6316  if (const_object->IsJSTypedArray()) {
6317  auto const_typed_array = const_object->AsJSTypedArray();
6318  if (!const_typed_array.is_on_heap() &&
6320  const_typed_array.elements_kind(broker()))) {
6321  size_t length = const_typed_array.length(broker());
6322  static_assert(ArrayBuffer::kMaxByteLength <=
6323  std::numeric_limits<intptr_t>::max());
6324  return GetIntPtrConstant(static_cast<intptr_t>(length));
6325  }
6326  }
6327  }
6328 
6329  // Note: We can't use broker()->length_string() here, because it could
6330  // conflict with redefinitions of the TypedArray length property.
6332  known_node_aspects().loaded_constant_properties, object,
6334  }
6335 
6336  ValueNode* result = AddNewNode<LoadTypedArrayLength>({object}, elements_kind);
6337  if (!is_variable_length) {
6341  }
6342  return result;
6343 }
static constexpr size_t kMaxByteLength
IntPtrConstant * GetIntPtrConstant(intptr_t constant)
constexpr bool IsRabGsabTypedArrayElementsKind(ElementsKind kind)
constexpr bool IsTypedArrayOrRabGsabTypedArrayElementsKind(ElementsKind kind)

References broker(), v8::internal::DCHECK(), v8::internal::IsRabGsabTypedArrayElementsKind(), v8::internal::IsTypedArrayOrRabGsabTypedArrayElementsKind(), v8::internal::compiler::kLoad, v8::ArrayBuffer::kMaxByteLength, v8::internal::length, v8::base::internal::result, RETURN_IF_DONE, v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::TryFindLoadedProperty(), and v8::internal::maglev::ValueNode::TryGetConstant().

Referenced by TryBuildPropertyLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildLogicalNot()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildLogicalNot ( ValueNode value)
private

Definition at line 8117 of file maglev-graph-builder.cc.

8117  {
8118  // TODO(victorgomes): Use NodeInfo to add more type optimizations here.
8119  switch (value->opcode()) {
8120 #define CASE(Name) \
8121  case Opcode::k##Name: { \
8122  return GetBooleanConstant( \
8123  !value->Cast<Name>()->ToBoolean(local_isolate())); \
8124  }
8126 #undef CASE
8127  default:
8128  return AddNewNode<LogicalNot>({value});
8129  }
8130 }
#define CONSTANT_VALUE_NODE_LIST(V)
Definition: maglev-ir.h:145

References CASE, CONSTANT_VALUE_NODE_LIST, and v8::internal::value.

◆ BuildLoopForPeeling()

void v8::internal::maglev::MaglevGraphBuilder::BuildLoopForPeeling ( )

Definition at line 14300 of file maglev-graph-builder.cc.

14300  {
14301  int loop_header = iterator_.current_offset();
14302  DCHECK(loop_headers_to_peel_.Contains(loop_header));
14303 
14304  // Since peeled loops do not start with a loop merge state, we need to
14305  // explicitly enter e loop effect tracking scope for the peeled iteration.
14306  bool track_peeled_effects =
14307  v8_flags.maglev_optimistic_peeled_loops && peeled_iteration_count_ == 2;
14308  if (track_peeled_effects) {
14309  BeginLoopEffects(loop_header);
14310  }
14311 
14312 #ifdef DEBUG
14313  bool was_in_peeled_iteration = in_peeled_iteration();
14314 #endif // DEBUG
14315 
14316  while (iterator_.current_bytecode() != interpreter::Bytecode::kJumpLoop) {
14319  iterator_.Advance();
14320  }
14321 
14322  VisitSingleBytecode(); // VisitJumpLoop
14323 
14324  DCHECK_EQ(was_in_peeled_iteration, in_peeled_iteration());
14325  if (!in_peeled_iteration()) {
14326  return;
14327  }
14328 
14329  // In case the peeled iteration was mergeable (see TryMergeLoop) or the
14330  // JumpLoop was dead, we are done.
14331  if (!current_block_) {
14335  if (track_peeled_effects) {
14336  EndLoopEffects(loop_header);
14337  }
14338  return;
14339  }
14340 
14342 
14343  // After processing the peeled iteration and reaching the `JumpLoop`, we
14344  // re-process the loop body. For this, we need to reset the graph building
14345  // state roughly as if we didn't process it yet.
14346 
14347  // Reset position in exception handler table to before the loop.
14348  HandlerTable table(*bytecode().object());
14349  while (next_handler_table_index_ > 0) {
14351  int start = table.GetRangeStart(next_handler_table_index_);
14352  if (start < loop_header) break;
14353  }
14354 
14355  // Re-create catch handler merge states.
14356  for (int offset = loop_header; offset <= iterator_.current_offset();
14357  ++offset) {
14358  if (auto& merge_state = merge_states_[offset]) {
14359  if (merge_state->is_exception_handler()) {
14361  *compilation_unit_, merge_state->frame_state().liveness(), offset,
14362  merge_state->exception_handler_was_used(),
14363  merge_state->catch_block_context_register(), graph_);
14364  } else {
14365  // We only peel innermost loops.
14366  DCHECK(!merge_state->is_loop());
14367  merge_state = nullptr;
14368  }
14369  }
14370  new (&jump_targets_[offset]) BasicBlockRef();
14371  }
14372 
14373  // Reset predecessors as if the loop body had not been visited.
14374  for (int offset : decremented_predecessor_offsets_) {
14375  DCHECK_GE(offset, loop_header);
14376  if (offset <= iterator_.current_offset()) {
14377  UpdatePredecessorCount(offset, 1);
14378  }
14379  }
14381 
14383  // After resetting, the actual loop header always has exactly 2
14384  // predecessors: the two copies of `JumpLoop`.
14385  InitializePredecessorCount(loop_header, 2);
14388  GetInLivenessFor(loop_header),
14389  &bytecode_analysis_.GetLoopInfoFor(loop_header),
14390  /* has_been_peeled */ true);
14391 
14392  BasicBlock* block = FinishBlock<Jump>({}, &jump_targets_[loop_header]);
14393  // If we ever want more peelings, we should ensure that only the last one
14394  // creates a loop header.
14397  v8_flags.maglev_optimistic_peeled_loops);
14398  merge_states_[loop_header]->InitializeLoop(
14401 
14402  if (track_peeled_effects) {
14403  EndLoopEffects(loop_header);
14404  }
14405  DCHECK_NE(iterator_.current_offset(), loop_header);
14406  iterator_.SetOffset(loop_header);
14407 }
const LoopInfo & GetLoopInfoFor(int header_offset) const
void UpdatePredecessorCount(uint32_t offset, int diff)
const compiler::BytecodeLivenessState * GetInLivenessFor(int offset) const
void InitializePredecessorCount(uint32_t offset, int amount)
static MergePointInterpreterFrameState * NewForLoop(const InterpreterFrameState &start_state, Graph *graph, const MaglevCompilationUnit &info, int merge_offset, int predecessor_count, const compiler::BytecodeLivenessState *liveness, const compiler::LoopInfo *loop_info, bool has_been_peeled=false)
static MergePointInterpreterFrameState * NewForCatchBlock(const MaglevCompilationUnit &unit, const compiler::BytecodeLivenessState *liveness, int handler_offset, bool was_used, interpreter::Register context_register, Graph *graph)
void InitializeLoop(MaglevGraphBuilder *graph_builder, MaglevCompilationUnit &compilation_unit, InterpreterFrameState &unmerged, BasicBlock *predecessor, bool optimistic_initial_state=false, LoopEffects *loop_effects=nullptr)
#define DCHECK_LE(v1, v2)
Definition: logging.h:489
#define DCHECK_GE(v1, v2)
Definition: logging.h:487

References v8::internal::DCHECK(), DCHECK_EQ, DCHECK_GE, DCHECK_IMPLIES, DCHECK_LE, DCHECK_NE, v8::internal::HandlerTable::GetRangeStart(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ BuildMergeStates()

void v8::internal::maglev::MaglevGraphBuilder::BuildMergeStates ( )

Definition at line 1137 of file maglev-graph-builder.cc.

1137  {
1138  auto offset_and_info = bytecode_analysis().GetLoopInfos().begin();
1139  auto end = bytecode_analysis().GetLoopInfos().end();
1140  while (offset_and_info != end && offset_and_info->first < entrypoint_) {
1141  ++offset_and_info;
1142  }
1143  for (; offset_and_info != end; ++offset_and_info) {
1144  int offset = offset_and_info->first;
1145  const compiler::LoopInfo& loop_info = offset_and_info->second;
1146  if (loop_headers_to_peel_.Contains(offset)) {
1147  // Peeled loops are treated like normal merges at first. We will construct
1148  // the proper loop header merge state when reaching the `JumpLoop` of the
1149  // peeled iteration.
1150  continue;
1151  }
1152  const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset);
1153  DCHECK_NULL(merge_states_[offset]);
1154  if (v8_flags.trace_maglev_graph_building) {
1155  std::cout << "- Creating loop merge state at @" << offset << std::endl;
1156  }
1159  predecessor_count(offset), liveness, &loop_info);
1160  }
1161 
1162  if (bytecode().handler_table_size() > 0) {
1163  HandlerTable table(*bytecode().object());
1164  for (int i = 0; i < table.NumberOfRangeEntries(); i++) {
1165  const int offset = table.GetRangeHandler(i);
1166  const bool was_used = table.HandlerWasUsed(i);
1167  const interpreter::Register context_reg(table.GetRangeData(i));
1168  const compiler::BytecodeLivenessState* liveness =
1169  GetInLivenessFor(offset);
1170  DCHECK_EQ(predecessor_count(offset), 0);
1171  DCHECK_NULL(merge_states_[offset]);
1172  if (v8_flags.trace_maglev_graph_building) {
1173  std::cout << "- Creating exception merge state at @" << offset
1174  << (was_used ? "" : " (never used)") << ", context register r"
1175  << context_reg.index() << std::endl;
1176  }
1178  *compilation_unit_, liveness, offset, was_used, context_reg, graph_);
1179  }
1180  }
1181 }
const ZoneMap< int, LoopInfo > & GetLoopInfos() const
const compiler::BytecodeAnalysis & bytecode_analysis() const

References bytecode(), bytecode_analysis(), compilation_unit_, v8::internal::BitVector::Contains(), current_interpreter_frame_, DCHECK_EQ, DCHECK_NULL, v8::internal::compiler::end(), entrypoint_, GetInLivenessFor(), v8::internal::compiler::BytecodeAnalysis::GetLoopInfos(), v8::internal::HandlerTable::GetRangeData(), v8::internal::HandlerTable::GetRangeHandler(), graph_, v8::internal::HandlerTable::HandlerWasUsed(), v8::internal::interpreter::Register::index(), loop_headers_to_peel_, merge_states_, v8::internal::maglev::MergePointInterpreterFrameState::NewForCatchBlock(), v8::internal::maglev::MergePointInterpreterFrameState::NewForLoop(), v8::internal::HandlerTable::NumberOfRangeEntries(), predecessor_count(), and v8::internal::v8_flags.

Referenced by Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildNewConsStringMap()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildNewConsStringMap ( ValueNode left,
ValueNode right 
)
private

Definition at line 2699 of file maglev-graph-builder.cc.

2700  {
2701  struct Result {
2702  bool static_map;
2703  bool is_two_byte;
2704  // The result map if the other map is known to be one byte.
2705  ValueNode* result_map;
2706  };
2707  // If either is a two byte map, then the result is the kConsTwoByteStringMap.
2708  // If both are non-two byte maps, then the result is the
2709  // kConsOneByteStringMap.
2710  auto GetIsTwoByteAndMap = [&](ValueNode* input) -> Result {
2711  if (auto maybe_constant = input->TryGetConstant(broker())) {
2712  bool two_byte = maybe_constant->map(broker()).IsTwoByteStringMap();
2713  return {true, two_byte,
2714  GetRootConstant(two_byte ? RootIndex::kConsTwoByteStringMap
2715  : RootIndex::kConsOneByteStringMap)};
2716  }
2717  switch (input->opcode()) {
2718  case Opcode::kNumberToString:
2719  return {true, false, GetRootConstant(RootIndex::kConsOneByteStringMap)};
2720  case Opcode::kInlinedAllocation: {
2721  VirtualObject* cons = input->Cast<InlinedAllocation>()->object();
2722  if (cons->type() == VirtualObject::kConsString) {
2723  ValueNode* map = cons->cons_string().map;
2724  if (auto cons_map = map->TryGetConstant(broker())) {
2725  return {true, cons_map->AsMap().IsTwoByteStringMap(), map};
2726  }
2727  return {false, false, map};
2728  }
2729  break;
2730  }
2731  default:
2732  break;
2733  }
2734  return {false, false, nullptr};
2735  };
2736 
2737  auto left_info = GetIsTwoByteAndMap(left);
2738  auto right_info = GetIsTwoByteAndMap(right);
2739  if (left_info.static_map) {
2740  if (left_info.is_two_byte) {
2741  return GetRootConstant(RootIndex::kConsTwoByteStringMap);
2742  }
2743  // If left is known non-twobyte, then the result only depends on right.
2744  if (right_info.static_map) {
2745  if (right_info.is_two_byte) {
2746  return GetRootConstant(RootIndex::kConsTwoByteStringMap);
2747  } else {
2748  return GetRootConstant(RootIndex::kConsOneByteStringMap);
2749  }
2750  }
2751  if (right_info.result_map) {
2752  return right_info.result_map;
2753  }
2754  } else if (left_info.result_map) {
2755  // Left is not constant, but we have a value for the map.
2756  // If right is known non-twobyte, then the result only depends on left.
2757  if (right_info.static_map && !right_info.is_two_byte) {
2758  return left_info.result_map;
2759  }
2760  }
2761 
2762  // Since ConsStringMap only cares about the two-byte-ness of its inputs we
2763  // might as well pass the result map instead if we have one.
2764  ValueNode* left_map =
2765  left_info.result_map ? left_info.result_map
2767  ValueNode* right_map =
2768  right_info.result_map
2769  ? right_info.result_map
2771  // Sort inputs for CSE. Move constants to the left since the instruction
2772  // reuses the lhs input.
2773  if (IsConstantNode(right_map->opcode()) ||
2774  (!IsConstantNode(left_map->opcode()) && left > right)) {
2775  std::swap(left, right);
2776  }
2777  // TODO(olivf): Evaluate if using maglev controlflow to select the map could
2778  // be faster here.
2779  return AddNewNode<ConsStringMap>({left_map, right_map});
2780 }

References broker(), BuildLoadTaggedField(), v8::internal::maglev::VirtualObject::cons_string(), GetRootConstant(), v8::internal::maglev::IsConstantNode(), v8::internal::maglev::VirtualObject::kConsString, v8::internal::HeapObject::kMapOffset, v8::internal::maglev::VirtualObject::VirtualConsString::map, v8::internal::maglev::NodeBase::opcode(), v8::internal::maglev::ValueNode::TryGetConstant(), and v8::internal::maglev::VirtualObject::type().

Referenced by TryBuildNewConsString().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildNumberOrOddballToFloat64()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildNumberOrOddballToFloat64 ( ValueNode node,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 4543 of file maglev-graph-builder.cc.

4545  {
4546  NodeType old_type;
4547  if (EnsureType(node, allowed_input_type, &old_type)) {
4548  if (old_type == NodeType::kSmi) {
4549  ValueNode* untagged_smi = BuildSmiUntag(node);
4550  return AddNewNode<ChangeInt32ToFloat64>({untagged_smi});
4551  }
4552  return AddNewNode<UncheckedNumberOrOddballToFloat64>({node},
4553  conversion_type);
4554  } else {
4555  return AddNewNode<CheckedNumberOrOddballToFloat64>({node}, conversion_type);
4556  }
4557 }

References BuildSmiUntag(), EnsureType(), and v8::internal::compiler::kSmi.

Referenced by GetFloat64ForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildOrdinaryHasInstance()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildOrdinaryHasInstance ( ValueNode object,
compiler::JSObjectRef  callable,
ValueNode callable_node 
)
private

Definition at line 12712 of file maglev-graph-builder.cc.

12714  {
12716  object, callable, callable_node_if_not_constant));
12717 
12718  return BuildCallBuiltin<Builtin::kOrdinaryHasInstance>(
12719  {callable_node_if_not_constant
12720  ? GetTaggedValue(callable_node_if_not_constant)
12721  : GetConstant(callable),
12722  GetTaggedValue(object)});
12723 }
MaybeReduceResult TryBuildFastOrdinaryHasInstance(ValueNode *object, compiler::JSObjectRef callable, ValueNode *callable_node)

References RETURN_IF_DONE.

◆ BuildRegisterFrameInitialization()

void v8::internal::maglev::MaglevGraphBuilder::BuildRegisterFrameInitialization ( ValueNode context = nullptr,
ValueNode closure = nullptr,
ValueNode new_target = nullptr 
)

Definition at line 1087 of file maglev-graph-builder.cc.

1088  {
1089  if (closure == nullptr &&
1091  compiler::JSFunctionRef function = compiler::MakeRefAssumeMemoryFence(
1092  broker(), broker()->CanonicalPersistentHandle(
1094  closure = GetConstant(function);
1095  context = GetConstant(function.context(broker()));
1096  }
1099 
1100  EnsureType(GetContext(), NodeType::kContext);
1101  EnsureType(GetClosure(), NodeType::kJSFunction);
1102 
1103  interpreter::Register new_target_or_generator_register =
1105 
1106  int register_index = 0;
1107 
1108  if (compilation_unit_->is_osr()) {
1109  for (; register_index < register_count(); register_index++) {
1110  auto val =
1111  AddNewNode<InitialValue>({}, interpreter::Register(register_index));
1112  InitializeRegister(interpreter::Register(register_index), val);
1113  graph_->osr_values().push_back(val);
1114  }
1115  return;
1116  }
1117 
1118  // TODO(leszeks): Don't emit if not needed.
1119  ValueNode* undefined_value = GetRootConstant(RootIndex::kUndefinedValue);
1120  if (new_target_or_generator_register.is_valid()) {
1121  int new_target_index = new_target_or_generator_register.index();
1122  for (; register_index < new_target_index; register_index++) {
1124  undefined_value);
1125  }
1127  new_target_or_generator_register,
1128  new_target ? new_target
1130  register_index++;
1131  }
1132  for (; register_index < register_count(); register_index++) {
1133  InitializeRegister(interpreter::Register(register_index), undefined_value);
1134  }
1135 }
interpreter::Register incoming_new_target_or_generator_register() const
Definition: heap-refs.cc:1595
static constexpr Register current_context()
static constexpr Register function_closure()
ZoneVector< InitialValue * > & osr_values()
Definition: maglev-graph.h:141
void set(interpreter::Register reg, ValueNode *value)
IndirectHandle< JSFunction > toplevel_function() const
void InitializeRegister(interpreter::Register reg, ValueNode *value)
ref_traits< T >::ref_type MakeRefAssumeMemoryFence(JSHeapBroker *broker, Tagged< T > object) requires(is_subtype_v< T
constexpr Register kJavaScriptCallNewTargetRegister
Definition: register-arm.h:322

References broker(), bytecode(), compilation_unit_, v8::internal::interpreter::Register::current_context(), current_interpreter_frame_, EnsureType(), v8::internal::interpreter::Register::function_closure(), GetClosure(), GetConstant(), GetContext(), GetRegisterInput(), GetRootConstant(), graph_, v8::internal::compiler::BytecodeArrayRef::incoming_new_target_or_generator_register(), v8::internal::interpreter::Register::index(), v8::internal::maglev::MaglevCompilationUnit::info(), InitializeRegister(), v8::internal::maglev::MaglevCompilationUnit::is_osr(), v8::internal::interpreter::Register::is_valid(), v8::internal::kJavaScriptCallNewTargetRegister, v8::internal::compiler::MakeRefAssumeMemoryFence(), v8::internal::maglev::Graph::osr_values(), v8::internal::ETWJITInterface::Register(), register_count(), v8::internal::maglev::InterpreterFrameState::set(), v8::internal::maglev::MaglevCompilationInfo::specialize_to_function_context(), and v8::internal::maglev::MaglevCompilationInfo::toplevel_function().

Referenced by Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildSmiUntag()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildSmiUntag ( ValueNode node)
private

Definition at line 4525 of file maglev-graph-builder.cc.

4525  {
4526  // This is called when converting inputs in AddNewNode. We might already have
4527  // an empty type for `node` here. Make sure we don't add unsafe conversion
4528  // nodes in that case by checking for the empty node type explicitly.
4529  // TODO(marja): The checks can be removed after we're able to bail out
4530  // earlier.
4531  if (!IsEmptyNodeType(GetType(node)) && EnsureType(node, NodeType::kSmi)) {
4532  if (SmiValuesAre31Bits()) {
4533  if (auto phi = node->TryCast<Phi>()) {
4534  phi->SetUseRequires31BitValue();
4535  }
4536  }
4537  return AddNewNode<UnsafeSmiUntag>({node});
4538  } else {
4539  return AddNewNode<CheckedSmiUntag>({node});
4540  }
4541 }
constexpr bool SmiValuesAre31Bits()
Definition: v8-internal.h:208

References EnsureType(), GetType(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::compiler::kSmi, v8::internal::SmiValuesAre31Bits(), and v8::internal::maglev::NodeBase::TryCast().

Referenced by BuildNumberOrOddballToFloat64(), GetInt32(), GetInt32ElementIndex(), and GetTruncatedInt32ForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildStoreConstantTypedArrayElement()

void v8::internal::maglev::MaglevGraphBuilder::BuildStoreConstantTypedArrayElement ( compiler::JSTypedArrayRef  typed_array,
ValueNode index,
ElementsKind  elements_kind 
)
private

Definition at line 6434 of file maglev-graph-builder.cc.

6436  {
6437 #define BUILD_STORE_CONSTANT_TYPED_ARRAY(Type, value) \
6438  AddNewNode<Store##Type##ConstantTypedArrayElement>( \
6439  {index, (value)}, typed_array, elements_kind);
6440 
6441  // TODO(leszeks): These operations have a deopt loop when the ToNumber
6442  // conversion sees a type other than number or oddball. Turbofan has the same
6443  // deopt loop, but ideally we'd avoid it.
6444  switch (elements_kind) {
6445  case UINT8_CLAMPED_ELEMENTS: {
6448  break;
6449  }
6450  case INT8_ELEMENTS:
6451  case INT16_ELEMENTS:
6452  case INT32_ELEMENTS:
6453  case UINT8_ELEMENTS:
6454  case UINT16_ELEMENTS:
6455  case UINT32_ELEMENTS:
6458  NodeType::kNumberOrOddball,
6459  TaggedToFloat64ConversionType::kNumberOrOddball))
6460  break;
6461  case FLOAT32_ELEMENTS:
6462  case FLOAT64_ELEMENTS:
6465  NodeType::kNumberOrOddball,
6466  TaggedToFloat64ConversionType::kNumberOrOddball))
6467  break;
6468  default:
6469  UNREACHABLE();
6470  }
6471 #undef BUILD_STORE_CONSTANT_TYPED_ARRAY
6472 }
ValueNode * GetAccumulatorTruncatedInt32ForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)
#define BUILD_STORE_CONSTANT_TYPED_ARRAY(Type, value)

References BUILD_STORE_CONSTANT_TYPED_ARRAY, v8::internal::maglev::FLOAT32_ELEMENTS, v8::internal::maglev::INT16_ELEMENTS, v8::internal::maglev::INT8_ELEMENTS, v8::internal::maglev::UINT16_ELEMENTS, v8::internal::maglev::UINT8_CLAMPED_ELEMENTS, v8::internal::maglev::UINT8_ELEMENTS, and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ BuildStoreContextSlot()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildStoreContextSlot ( ValueNode context,
size_t  depth,
int  slot_index,
ValueNode value,
ContextMode  context_mode 
)
private

Definition at line 3799 of file maglev-graph-builder.cc.

3801  {
3802  context = GetContextAtDepth(context, depth);
3803  return StoreAndCacheContextSlot(context, slot_index, value, context_mode);
3804 }
ReduceResult StoreAndCacheContextSlot(ValueNode *context, int index, ValueNode *value, ContextMode context_mode)

References GetContextAtDepth(), StoreAndCacheContextSlot(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildStoreFixedArrayElement()

void v8::internal::maglev::MaglevGraphBuilder::BuildStoreFixedArrayElement ( ValueNode elements,
ValueNode index,
ValueNode value 
)
private

Definition at line 5338 of file maglev-graph-builder.cc.

5340  {
5341  // TODO(victorgomes): Support storing element to a virtual object. If we
5342  // modify the elements array, we need to modify the original object to point
5343  // to the new elements array.
5344  if (CanElideWriteBarrier(elements, value)) {
5345  AddNewNode<StoreFixedArrayElementNoWriteBarrier>({elements, index, value});
5346  } else {
5347  AddNewNode<StoreFixedArrayElementWithWriteBarrier>(
5348  {elements, index, value});
5349  }
5350 }
bool CanElideWriteBarrier(ValueNode *object, ValueNode *value)

References CanElideWriteBarrier(), v8::internal::index, and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildStoreFixedDoubleArrayElement()

void v8::internal::maglev::MaglevGraphBuilder::BuildStoreFixedDoubleArrayElement ( ValueNode elements,
ValueNode index,
ValueNode value 
)
private

Definition at line 5380 of file maglev-graph-builder.cc.

5382  {
5383  // TODO(victorgomes): Support storing double element to a virtual object.
5384  AddNewNode<StoreFixedDoubleArrayElement>({elements, index, value});
5385 }

References v8::internal::index, and v8::internal::value.

◆ BuildStoreMap()

void v8::internal::maglev::MaglevGraphBuilder::BuildStoreMap ( ValueNode object,
compiler::MapRef  map,
StoreMap::Kind  kind 
)
private

Definition at line 5648 of file maglev-graph-builder.cc.

5649  {
5650  AddNewNode<StoreMap>({object}, map, kind);
5651  NodeType object_type = StaticTypeForMap(map, broker());
5652  NodeInfo* node_info = GetOrCreateInfoFor(object);
5653  if (map.is_stable()) {
5654  node_info->SetPossibleMaps(PossibleMaps{map}, false, object_type, broker());
5656  } else {
5657  node_info->SetPossibleMaps(PossibleMaps{map}, true, object_type, broker());
5659  }
5660 }

References v8::internal::maglev::KnownNodeAspects::any_map_for_any_node_is_unstable, broker(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnStableMap(), GetOrCreateInfoFor(), v8::internal::compiler::MapRef::is_stable(), known_node_aspects(), v8::internal::maglev::NodeInfo::SetPossibleMaps(), and v8::internal::maglev::StaticTypeForMap().

Referenced by TryBuildStoreField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildStoreTaggedField()

Node * v8::internal::maglev::MaglevGraphBuilder::BuildStoreTaggedField ( ValueNode object,
ValueNode value,
int  offset,
StoreTaggedMode  store_mode 
)
private

Definition at line 5233 of file maglev-graph-builder.cc.

5235  {
5236  // The value may be used to initialize a VO, which can leak to IFS.
5237  // It should NOT be a conversion node, UNLESS it's an initializing value.
5238  // Initializing values are tagged before allocation, since conversion nodes
5239  // may allocate, and are not used to set a VO.
5241  !value->properties().is_conversion());
5242  // TODO(marja): Bail out if `value` has the empty type. This requires that
5243  // BuildInlinedAllocation can bail out.
5244  if (store_mode != StoreTaggedMode::kInitializing) {
5246  }
5247  if (CanElideWriteBarrier(object, value)) {
5248  return AddNewNode<StoreTaggedFieldNoWriteBarrier>({object, value}, offset,
5249  store_mode);
5250  } else {
5251  // Detect stores that would create old-to-new references and pretenure the
5252  // value.
5253  if (v8_flags.maglev_pretenure_store_values) {
5254  if (auto alloc = object->TryCast<InlinedAllocation>()) {
5255  if (alloc->allocation_block()->allocation_type() ==
5257  alloc->allocation_block()->TryPretenure(value);
5258  }
5259  }
5260  }
5261  return AddNewNode<StoreTaggedFieldWithWriteBarrier>({object, value}, offset,
5262  store_mode);
5263  }
5264 }
void TryBuildStoreTaggedFieldToAllocation(ValueNode *object, ValueNode *value, int offset)

References CanElideWriteBarrier(), DCHECK_IMPLIES, v8::internal::maglev::kInitializing, v8::internal::kOld, TryBuildStoreTaggedFieldToAllocation(), v8::internal::maglev::NodeBase::TryCast(), v8::internal::v8_flags, and v8::internal::value.

Referenced by BuildInitializeStore(), BuildStoreTrustedPointerField(), StoreAndCacheContextSlot(), TryBuildPropertyCellStore(), TryBuildStoreField(), and TrySpecializeStoreContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildStoreTaggedFieldNoWriteBarrier()

Node * v8::internal::maglev::MaglevGraphBuilder::BuildStoreTaggedFieldNoWriteBarrier ( ValueNode object,
ValueNode value,
int  offset,
StoreTaggedMode  store_mode 
)
private

Definition at line 5266 of file maglev-graph-builder.cc.

5268  {
5269  // The value may be used to initialize a VO, which can leak to IFS.
5270  // It should NOT be a conversion node, UNLESS it's an initializing value.
5271  // Initializing values are tagged before allocation, since conversion nodes
5272  // may allocate, and are not used to set a VO.
5274  !value->properties().is_conversion());
5275  DCHECK(CanElideWriteBarrier(object, value));
5276  if (store_mode != StoreTaggedMode::kInitializing) {
5278  }
5279  return AddNewNode<StoreTaggedFieldNoWriteBarrier>({object, value}, offset,
5280  store_mode);
5281 }

References CanElideWriteBarrier(), v8::internal::DCHECK(), DCHECK_IMPLIES, v8::internal::maglev::kInitializing, TryBuildStoreTaggedFieldToAllocation(), and v8::internal::value.

Referenced by TryBuildStoreField(), and TrySpecializeStoreContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildStoreTrustedPointerField()

void v8::internal::maglev::MaglevGraphBuilder::BuildStoreTrustedPointerField ( ValueNode object,
ValueNode value,
int  offset,
IndirectPointerTag  tag,
StoreTaggedMode  store_mode 
)
private

Definition at line 5283 of file maglev-graph-builder.cc.

5285  {
5286 #ifdef V8_ENABLE_SANDBOX
5287  AddNewNode<StoreTrustedPointerFieldWithWriteBarrier>({object, value}, offset,
5288  tag, store_mode);
5289 #else
5290  BuildStoreTaggedField(object, value, offset, store_mode);
5291 #endif // V8_ENABLE_SANDBOX
5292 }

References BuildStoreTaggedField(), and v8::internal::value.

Referenced by BuildInitializeStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildStoreTypedArrayElement()

void v8::internal::maglev::MaglevGraphBuilder::BuildStoreTypedArrayElement ( ValueNode object,
ValueNode index,
ElementsKind  elements_kind 
)
private

Definition at line 6396 of file maglev-graph-builder.cc.

6397  {
6398 #define BUILD_STORE_TYPED_ARRAY(Type, value) \
6399  AddNewNode<Store##Type##TypedArrayElement>({object, index, (value)}, \
6400  elements_kind);
6401 
6402  // TODO(leszeks): These operations have a deopt loop when the ToNumber
6403  // conversion sees a type other than number or oddball. Turbofan has the same
6404  // deopt loop, but ideally we'd avoid it.
6405  switch (elements_kind) {
6406  case UINT8_CLAMPED_ELEMENTS: {
6408  break;
6409  }
6410  case INT8_ELEMENTS:
6411  case INT16_ELEMENTS:
6412  case INT32_ELEMENTS:
6413  case UINT8_ELEMENTS:
6414  case UINT16_ELEMENTS:
6415  case UINT32_ELEMENTS:
6418  NodeType::kNumberOrOddball,
6419  TaggedToFloat64ConversionType::kNumberOrOddball))
6420  break;
6421  case FLOAT32_ELEMENTS:
6422  case FLOAT64_ELEMENTS:
6425  NodeType::kNumberOrOddball,
6426  TaggedToFloat64ConversionType::kNumberOrOddball))
6427  break;
6428  default:
6429  UNREACHABLE();
6430  }
6431 #undef BUILD_STORE_TYPED_ARRAY
6432 }
#define BUILD_STORE_TYPED_ARRAY(Type, value)

References BUILD_STORE_TYPED_ARRAY, v8::internal::maglev::FLOAT32_ELEMENTS, v8::internal::maglev::INT16_ELEMENTS, v8::internal::maglev::INT8_ELEMENTS, v8::internal::maglev::UINT16_ELEMENTS, v8::internal::maglev::UINT8_CLAMPED_ELEMENTS, v8::internal::maglev::UINT8_ELEMENTS, and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ BuildStringConcat()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildStringConcat ( ValueNode left,
ValueNode right 
)
private

Definition at line 2913 of file maglev-graph-builder.cc.

2914  {
2915  if (RootConstant* root_constant = left->TryCast<RootConstant>()) {
2916  if (root_constant->index() == RootIndex::kempty_string) {
2918  SetAccumulator(right);
2919  return ReduceResult::Done();
2920  }
2921  }
2922  if (RootConstant* root_constant = right->TryCast<RootConstant>()) {
2923  if (root_constant->index() == RootIndex::kempty_string) {
2925  SetAccumulator(left);
2926  return ReduceResult::Done();
2927  }
2928  }
2932  SetAccumulator);
2933  SetAccumulator(AddNewNode<StringConcat>({left, right}));
2934  return ReduceResult::Done();
2935 }
ReduceResult BuildCheckString(ValueNode *object)
MaybeReduceResult TryBuildNewConsString(ValueNode *left, ValueNode *right, AllocationType allocation_type=AllocationType::kYoung)

References BuildCheckString(), v8::internal::maglev::ReduceResult::Done(), PROCESS_AND_RETURN_IF_DONE, RETURN_IF_ABORT, SetAccumulator(), TryBuildNewConsString(), and v8::internal::maglev::NodeBase::TryCast().

Referenced by VisitBinaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildTaggedEqual() [1/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildTaggedEqual ( ValueNode lhs,
RootIndex  rhs_index 
)
private

Definition at line 3935 of file maglev-graph-builder.cc.

3936  {
3937  return BuildTaggedEqual(lhs, GetRootConstant(rhs_index));
3938 }
ValueNode * BuildTaggedEqual(ValueNode *lhs, ValueNode *rhs)

References BuildTaggedEqual(), and GetRootConstant().

+ Here is the call graph for this function:

◆ BuildTaggedEqual() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildTaggedEqual ( ValueNode lhs,
ValueNode rhs 
)
private

Definition at line 3914 of file maglev-graph-builder.cc.

3915  {
3916  ValueNode* tagged_lhs = GetTaggedValue(lhs);
3917  ValueNode* tagged_rhs = GetTaggedValue(rhs);
3918  if (tagged_lhs == tagged_rhs) {
3919  return GetBooleanConstant(true);
3920  }
3921  if (HaveDisjointTypes(tagged_lhs, tagged_rhs)) {
3922  return GetBooleanConstant(false);
3923  }
3924  // TODO(victorgomes): We could retrieve the HeapObjectRef in Constant and
3925  // compare them.
3926  if (IsConstantNode(tagged_lhs->opcode()) && !tagged_lhs->Is<Constant>() &&
3927  tagged_lhs->opcode() == tagged_rhs->opcode()) {
3928  // Constants nodes are canonicalized, except for the node holding
3929  // HeapObjectRef, so equal constants should have been handled above.
3930  return GetBooleanConstant(false);
3931  }
3932  return AddNewNode<TaggedEqual>({tagged_lhs, tagged_rhs});
3933 }
bool HaveDisjointTypes(ValueNode *lhs, ValueNode *rhs)

References GetBooleanConstant(), GetTaggedValue(), HaveDisjointTypes(), v8::internal::maglev::NodeBase::Is(), v8::internal::maglev::IsConstantNode(), and v8::internal::maglev::NodeBase::opcode().

Referenced by BuildTaggedEqual(), TryReduceCompareEqualAgainstConstant(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildTestUndetectable()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildTestUndetectable ( ValueNode value)
private

Definition at line 3947 of file maglev-graph-builder.cc.

3947  {
3948  if (value->properties().value_representation() ==
3950  return AddNewNode<HoleyFloat64IsHole>({value});
3951  } else if (value->properties().value_representation() !=
3953  return GetBooleanConstant(false);
3954  }
3955 
3956  if (auto maybe_constant = TryGetConstant(value)) {
3957  auto map = maybe_constant.value().map(broker());
3958  return GetBooleanConstant(map.is_undetectable());
3959  }
3960 
3961  NodeType node_type;
3962  if (CheckType(value, NodeType::kSmi, &node_type)) {
3963  return GetBooleanConstant(false);
3964  }
3965 
3966  auto it = known_node_aspects().FindInfo(value);
3967  if (known_node_aspects().IsValid(it)) {
3968  NodeInfo& info = it->second;
3969  if (info.possible_maps_are_known()) {
3970  // We check if all the possible maps have the same undetectable bit value.
3971  DCHECK_GT(info.possible_maps().size(), 0);
3972  bool first_is_undetectable = info.possible_maps()[0].is_undetectable();
3973  bool all_the_same_value =
3974  std::all_of(info.possible_maps().begin(), info.possible_maps().end(),
3975  [first_is_undetectable](compiler::MapRef map) {
3976  bool is_undetectable = map.is_undetectable();
3977  return (first_is_undetectable && is_undetectable) ||
3978  (!first_is_undetectable && !is_undetectable);
3979  });
3980  if (all_the_same_value) {
3981  return GetBooleanConstant(first_is_undetectable);
3982  }
3983  }
3984  }
3985 
3986  enum CheckType type = GetCheckType(node_type);
3987  return AddNewNode<TestUndetectable>({value}, type);
3988 }
bool all_of(const C &container, const P &predicate)
NodeInfos::iterator FindInfo(ValueNode *node)

References v8::base::all_of(), v8::internal::ZoneCompactSet< T >::begin(), broker(), CheckType(), DCHECK_GT, v8::internal::ZoneCompactSet< T >::end(), v8::internal::maglev::KnownNodeAspects::FindInfo(), GetBooleanConstant(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), v8::internal::maglev::kHoleyFloat64, known_node_aspects(), v8::internal::compiler::kSmi, v8::internal::maglev::kTagged, v8::internal::maglev::NodeInfo::possible_maps(), v8::internal::maglev::NodeInfo::possible_maps_are_known(), v8::internal::ZoneCompactSet< T >::size(), TryGetConstant(), v8::internal::tracing::type, and v8::internal::value.

Referenced by BuildBranchIfUndetectable().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildToBoolean()

template<bool flip>
ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildToBoolean ( ValueNode node)
private

Definition at line 12846 of file maglev-graph-builder.cc.

12846  {
12847  if (IsConstantNode(value->opcode())) {
12849  flip);
12850  }
12851 
12852  switch (value->value_representation()) {
12855  // The ToBoolean of both the_hole and NaN is false, so we can use the
12856  // same operation for HoleyFloat64 and Float64.
12857  return AddNewNode<Float64ToBoolean>({value}, flip);
12858 
12860  // Uint32 has the same logic as Int32 when converting ToBoolean, namely
12861  // comparison against zero, so we can cast it and ignore the signedness.
12862  value = AddNewNode<TruncateUint32ToInt32>({value});
12863  [[fallthrough]];
12865  return AddNewNode<Int32ToBoolean>({value}, flip);
12866 
12868  return AddNewNode<IntPtrToBoolean>({value}, flip);
12869 
12871  break;
12872  }
12873 
12874  NodeInfo* node_info = known_node_aspects().TryGetInfoFor(value);
12875  if (node_info) {
12876  if (ValueNode* as_int32 = node_info->alternative().int32()) {
12877  return AddNewNode<Int32ToBoolean>({as_int32}, flip);
12878  }
12879  if (ValueNode* as_float64 = node_info->alternative().float64()) {
12880  return AddNewNode<Float64ToBoolean>({as_float64}, flip);
12881  }
12882  }
12883 
12885  if (CheckType(value, NodeType::kJSReceiver, &value_type)) {
12886  ValueNode* result = BuildTestUndetectable(value);
12887  // TODO(victorgomes): Check if it is worth to create
12888  // TestUndetectableLogicalNot or to remove ToBooleanLogicalNot, since we
12889  // already optimize LogicalNots by swapping the branches.
12890  if constexpr (!flip) {
12892  }
12893  return result;
12894  }
12895  ValueNode* falsy_value = nullptr;
12896  if (CheckType(value, NodeType::kString)) {
12897  falsy_value = GetRootConstant(RootIndex::kempty_string);
12898  } else if (CheckType(value, NodeType::kSmi)) {
12899  falsy_value = GetSmiConstant(0);
12900  }
12901  if (falsy_value != nullptr) {
12902  return AddNewNode<std::conditional_t<flip, TaggedEqual, TaggedNotEqual>>(
12903  {value, falsy_value});
12904  }
12905  if (CheckType(value, NodeType::kBoolean)) {
12906  if constexpr (flip) {
12908  }
12909  return value;
12910  }
12911  return AddNewNode<std::conditional_t<flip, ToBooleanLogicalNot, ToBoolean>>(
12913 }
ValueNode * BuildLogicalNot(ValueNode *value)
static ValueType value_type()

References v8::internal::maglev::NodeInfo::alternative(), v8::internal::maglev::FromConstantToBool(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), v8::internal::maglev::IsConstantNode(), v8::internal::anonymous_namespace{ic.cc}::kIntPtr, v8::internal::compiler::kSmi, v8::base::internal::result, v8::internal::value, and v8::internal::wasm::value_type().

+ Here is the call graph for this function:

◆ BuildToNumberOrToNumeric()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildToNumberOrToNumeric ( Object::Conversion  mode)
private

Definition at line 12995 of file maglev-graph-builder.cc.

12996  {
12997  ValueNode* value = GetAccumulator();
12998  switch (value->value_representation()) {
13003  return ReduceResult::Done();
13004 
13006  SetAccumulator(AddNewNode<HoleyFloat64ToMaybeNanFloat64>({value}));
13007  return ReduceResult::Done();
13008  }
13009 
13011  // We'll insert the required checks depending on the feedback.
13012  break;
13013  }
13014 
13015  FeedbackSlot slot = GetSlotOperand(0);
13016  switch (broker()->GetFeedbackForBinaryOperation(
13017  compiler::FeedbackSource(feedback(), slot))) {
13020  break;
13023  UNREACHABLE();
13028  EnsureType(value, NodeType::kNumber)) {
13029  return ReduceResult::Done();
13030  }
13031  AddNewNode<CheckNumber>({value}, mode);
13032  break;
13034  // TODO(leszeks): Faster ToNumber for kNumberOrOddball
13039  if (CheckType(value, NodeType::kNumber)) return ReduceResult::Done();
13040  SetAccumulator(AddNewNode<ToNumberOrNumeric>({value}, mode));
13041  break;
13042  }
13043  return ReduceResult::Done();
13044 }
ReduceResult BuildCheckSmi(ValueNode *object, bool elidable=true)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enables Turboshaft s StaticAssert and CheckTurboshaftTypeOf operations Wasm code into JS functions via the JS to Wasm wrappers are still inlined in TurboFan For controlling whether to at see turbo inline js wasm calls enable Turboshaft s loop unrolling enable an additional Turboshaft phase that performs optimizations based on type information enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps trace Turboshaft s if else to switch reducer invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the preconfigured old space Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often Release pooled large pages after X seconds prints number of allocations and enables analysis mode for gc fuzz e g stress stress scavenge force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible discard the memory pool before invoking the GC on memory pressure or last resort GCs Delay before memory reducer start virtual randomize memory reservations by ignoring any hints passed when allocating pages use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of SAHF instruction if enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long mode(MIPS/PPC only)") DEFINE_BOOL(partial_constant_pool

References broker(), v8::internal::kAdditiveSafeInteger, v8::internal::kAny, v8::internal::kBigInt, v8::internal::kBigInt64, v8::internal::anonymous_namespace{ic.cc}::kIntPtr, v8::internal::kNone, v8::internal::kNumber, v8::internal::kNumberOrOddball, v8::internal::kSignedSmall, v8::internal::kSignedSmallInputs, v8::internal::kString, v8::internal::kStringOrStringWrapper, v8::internal::Object::kToNumber, mode(), RETURN_IF_ABORT, v8::internal::UNREACHABLE(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildToString()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildToString ( ValueNode value,
ToString::ConversionMode  mode 
)
private

Definition at line 12984 of file maglev-graph-builder.cc.

12985  {
12986  if (CheckType(value, NodeType::kString)) return value;
12987  // TODO(victorgomes): Add fast path for constant primitives.
12988  if (CheckType(value, NodeType::kNumber)) {
12989  // TODO(verwaest): Float64ToString if float.
12990  return AddNewNode<NumberToString>({value});
12991  }
12992  return AddNewNode<ToString>({GetContext(), value}, mode);
12993 }

References mode(), and v8::internal::value.

+ Here is the call graph for this function:

◆ BuildTransitionElementsKindAndCompareMaps()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildTransitionElementsKindAndCompareMaps ( ValueNode heap_object,
ValueNode object_map,
const ZoneVector< compiler::MapRef > &  transition_sources,
compiler::MapRef  transition_target,
MaglevSubGraphBuilder sub_graph,
std::optional< MaglevSubGraphBuilder::Label > &  if_not_matched 
)
private

Definition at line 5011 of file maglev-graph-builder.cc.

5015  {
5016  DCHECK(!transition_target.is_migration_target());
5017 
5018  NodeInfo* known_info = GetOrCreateInfoFor(heap_object);
5019 
5020  // TODO(pthier): Calculate and use the intersection of known maps with
5021  // (transition_sources union transition_target).
5022 
5023  ValueNode* new_map = AddNewNode<TransitionElementsKind>(
5024  {heap_object, object_map}, transition_sources, transition_target);
5025 
5026  // TODO(pthier): Support map packing.
5028  if_not_matched.emplace(sub_graph, 1);
5029  sub_graph->GotoIfFalse<BranchIfReferenceEqual>(
5030  &*if_not_matched, {new_map, GetConstant(transition_target)});
5031  // After the branch, object's map is transition_target.
5032  DCHECK(transition_target.IsJSReceiverMap());
5033  known_info->SetPossibleMaps(
5034  PossibleMaps{transition_target}, !transition_target.is_stable(),
5035  StaticTypeForMap(transition_target, broker()), broker());
5036  if (!transition_target.is_stable()) {
5038  } else {
5039  broker()->dependencies()->DependOnStableMap(transition_target);
5040  }
5041  return ReduceResult::Done();
5042 }

References v8::internal::maglev::KnownNodeAspects::any_map_for_any_node_is_unstable, broker(), v8::internal::DCHECK(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnStableMap(), v8::internal::maglev::ReduceResult::Done(), GetConstant(), GetOrCreateInfoFor(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfFalse(), v8::internal::compiler::MapRef::is_migration_target(), v8::internal::compiler::MapRef::is_stable(), known_node_aspects(), v8::internal::maglev::NodeInfo::SetPossibleMaps(), v8::internal::maglev::StaticTypeForMap(), and V8_MAP_PACKING_BOOL.

+ Here is the call graph for this function:

◆ BuildTransitionElementsKindOrCheckMap()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildTransitionElementsKindOrCheckMap ( ValueNode heap_object,
ValueNode object_map,
const ZoneVector< compiler::MapRef > &  transition_sources,
compiler::MapRef  transition_target 
)
private

Definition at line 4942 of file maglev-graph-builder.cc.

4945  {
4946  // TODO(marja): Optimizations based on what we know about the intersection of
4947  // known maps and transition sources or transition target.
4948 
4949  // TransitionElementsKind doesn't happen in cases where we'd need to do
4950  // CheckMapsWithMigration instead of CheckMaps.
4951  CHECK(!transition_target.is_migration_target());
4952  for (const compiler::MapRef transition_source : transition_sources) {
4953  CHECK(!transition_source.is_migration_target());
4954  }
4955 
4956  NodeInfo* known_info = GetOrCreateInfoFor(heap_object);
4957 
4958  AddNewNode<TransitionElementsKindOrCheckMap>(
4959  {heap_object, object_map}, transition_sources, transition_target);
4960  // After this operation, heap_object's map is transition_target (or we
4961  // deopted).
4962  known_info->SetPossibleMaps(
4963  PossibleMaps{transition_target}, !transition_target.is_stable(),
4964  StaticTypeForMap(transition_target, broker()), broker());
4965  DCHECK(transition_target.IsJSReceiverMap());
4966  if (!transition_target.is_stable()) {
4968  } else {
4969  broker()->dependencies()->DependOnStableMap(transition_target);
4970  }
4971  return ReduceResult::Done();
4972 }

References v8::internal::maglev::KnownNodeAspects::any_map_for_any_node_is_unstable, broker(), CHECK, v8::internal::DCHECK(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnStableMap(), v8::internal::maglev::ReduceResult::Done(), GetOrCreateInfoFor(), v8::internal::compiler::MapRef::is_migration_target(), v8::internal::compiler::MapRef::is_stable(), known_node_aspects(), v8::internal::maglev::NodeInfo::SetPossibleMaps(), and v8::internal::maglev::StaticTypeForMap().

+ Here is the call graph for this function:

◆ BuildTruncatingInt32BinaryOperationNodeForToNumber()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildTruncatingInt32BinaryOperationNodeForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2439 of file maglev-graph-builder.cc.

2441  {
2442  static_assert(BinaryOperationIsBitwiseInt32<kOperation>());
2443  ValueNode* left;
2444  ValueNode* right;
2446  left = right = GetTruncatedInt32ForToNumber(
2448  allowed_input_type, conversion_type);
2449  } else {
2452  allowed_input_type, conversion_type);
2453  right =
2455  allowed_input_type, conversion_type);
2456  }
2458  TryFoldInt32BinaryOperation<kOperation>(left, right), SetAccumulator);
2459  SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
2460  return ReduceResult::Done();
2461 }
ValueNode * get(interpreter::Register reg) const
ValueNode * GetTruncatedInt32ForToNumber(ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)

References v8::internal::maglev::InterpreterFrameState::accumulator(), AddNewNode(), current_interpreter_frame_, v8::internal::maglev::ReduceResult::Done(), v8::internal::maglev::InterpreterFrameState::get(), v8::internal::interpreter::BytecodeArrayIterator::GetRegisterOperand(), GetTruncatedInt32ForToNumber(), IsRegisterEqualToAccumulator(), iterator_, PROCESS_AND_RETURN_IF_DONE, and SetAccumulator().

+ Here is the call graph for this function:

◆ BuildTruncatingInt32BinarySmiOperationNodeForToNumber()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildTruncatingInt32BinarySmiOperationNodeForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2488 of file maglev-graph-builder.cc.

2490  {
2491  static_assert(BinaryOperationIsBitwiseInt32<kOperation>());
2492  ValueNode* left =
2494  allowed_input_type, conversion_type);
2495  int32_t constant = iterator_.GetImmediateOperand(0);
2496  if (std::optional<int>(constant) == Int32Identity<kOperation>()) {
2497  // If the constant is the unit of the operation, it already has the right
2498  // value, so use the truncated value (if not just a conversion) and return.
2499  if (!left->properties().is_conversion()) {
2501  }
2502  return ReduceResult::Done();
2503  }
2505  TryFoldInt32BinaryOperation<kOperation>(left, constant), SetAccumulator);
2506  ValueNode* right = GetInt32Constant(constant);
2507  SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
2508  return ReduceResult::Done();
2509 }

References v8::internal::maglev::InterpreterFrameState::accumulator(), AddNewNode(), current_interpreter_frame_, v8::internal::maglev::ReduceResult::Done(), v8::internal::interpreter::BytecodeArrayIterator::GetImmediateOperand(), GetInt32Constant(), GetTruncatedInt32ForToNumber(), v8::internal::maglev::OpProperties::is_conversion(), iterator_, PROCESS_AND_RETURN_IF_DONE, v8::internal::maglev::NodeBase::properties(), v8::internal::maglev::InterpreterFrameState::set_accumulator(), and SetAccumulator().

Referenced by VisitBinarySmiOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildTruncatingInt32BitwiseNotForToNumber()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::BuildTruncatingInt32BitwiseNotForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2346 of file maglev-graph-builder.cc.

2348  {
2349  ValueNode* value =
2351  allowed_input_type, conversion_type);
2353  TryFoldInt32UnaryOperation<Operation::kBitwiseNot>(value),
2354  SetAccumulator);
2355  SetAccumulator(AddNewNode<Int32BitwiseNot>({value}));
2356  return ReduceResult::Done();
2357 }

References v8::internal::maglev::InterpreterFrameState::accumulator(), current_interpreter_frame_, v8::internal::maglev::ReduceResult::Done(), GetTruncatedInt32ForToNumber(), PROCESS_AND_RETURN_IF_DONE, SetAccumulator(), and v8::internal::value.

Referenced by VisitUnaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildUnwrapStringWrapper()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::BuildUnwrapStringWrapper ( ValueNode input)
private

Definition at line 2907 of file maglev-graph-builder.cc.

2907  {
2908  DCHECK(NodeTypeIs(GetType(input), NodeType::kStringOrStringWrapper));
2909  if (NodeTypeIs(GetType(input), NodeType::kString)) return input;
2910  return AddNewNode<UnwrapStringWrapper>({input});
2911 }

References v8::internal::DCHECK(), GetType(), and v8::internal::maglev::NodeTypeIs().

Referenced by VisitBinaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ BuildVirtualArgumentsObject()

template<CreateArgumentsType type>
VirtualObject * v8::internal::maglev::MaglevGraphBuilder::BuildVirtualArgumentsObject
private

Definition at line 13845 of file maglev-graph-builder.cc.

13845  {
13846  switch (type) {
13848  if (parameter_count_without_receiver() == 0) {
13849  // If there is no aliasing, the arguments object elements are not
13850  // special in any way, we can just return an unmapped backing store.
13851  if (is_inline()) {
13853  ValueNode* elements = BuildInlinedArgumentsElements(0, length);
13854  return CreateArgumentsObject(
13855  broker()->target_native_context().sloppy_arguments_map(broker()),
13856  GetInt32Constant(length), elements, GetClosure());
13857  } else {
13858  ArgumentsLength* length = AddNewNode<ArgumentsLength>({});
13860  ArgumentsElements* elements = AddNewNode<ArgumentsElements>(
13863  return CreateArgumentsObject(
13864  broker()->target_native_context().sloppy_arguments_map(broker()),
13865  length, elements, GetClosure());
13866  }
13867  } else {
13868  // If the parameter count is zero, we should have used the unmapped
13869  // backing store.
13870  int param_count = parameter_count_without_receiver();
13871  DCHECK_GT(param_count, 0);
13873  int param_idx_in_ctxt = compilation_unit_->shared_function_info()
13875  param_count - 1;
13876  // The {unmapped_elements} correspond to the extra arguments
13877  // (overapplication) that do not need be "mapped" to the actual
13878  // arguments. Mapped arguments are accessed via the context, whereas
13879  // unmapped arguments are simply accessed via this fixed array. See
13880  // SloppyArgumentsElements in src/object/arguments.h.
13881  if (is_inline()) {
13883  int mapped_count = std::min(param_count, length);
13884  ValueNode* unmapped_elements =
13886  VirtualObject* elements = CreateMappedArgumentsElements(
13887  broker()->sloppy_arguments_elements_map(), mapped_count,
13888  GetContext(), unmapped_elements);
13889  for (int i = 0; i < mapped_count; i++, param_idx_in_ctxt--) {
13891  GetInt32Constant(param_idx_in_ctxt));
13892  }
13893  return CreateArgumentsObject(
13894  broker()->target_native_context().fast_aliased_arguments_map(
13895  broker()),
13896  GetInt32Constant(length), elements, GetClosure());
13897  } else {
13898  ArgumentsLength* length = AddNewNode<ArgumentsLength>({});
13900  ArgumentsElements* unmapped_elements = AddNewNode<ArgumentsElements>(
13902  VirtualObject* elements = CreateMappedArgumentsElements(
13903  broker()->sloppy_arguments_elements_map(), param_count,
13904  GetContext(), unmapped_elements);
13905  ValueNode* the_hole_value = GetConstant(broker()->the_hole_value());
13906  for (int i = 0; i < param_count; i++, param_idx_in_ctxt--) {
13907  ValueNode* value = Select(
13908  [&](auto& builder) {
13909  return BuildBranchIfInt32Compare(builder,
13912  },
13913  [&] { return GetSmiConstant(param_idx_in_ctxt); },
13914  [&] { return the_hole_value; });
13916  }
13917  return CreateArgumentsObject(
13918  broker()->target_native_context().fast_aliased_arguments_map(
13919  broker()),
13920  length, elements, GetClosure());
13921  }
13922  }
13924  if (is_inline()) {
13926  ValueNode* elements = BuildInlinedArgumentsElements(0, length);
13927  return CreateArgumentsObject(
13928  broker()->target_native_context().strict_arguments_map(broker()),
13929  GetInt32Constant(length), elements);
13930  } else {
13931  ArgumentsLength* length = AddNewNode<ArgumentsLength>({});
13933  ArgumentsElements* elements = AddNewNode<ArgumentsElements>(
13936  return CreateArgumentsObject(
13937  broker()->target_native_context().strict_arguments_map(broker()),
13938  length, elements);
13939  }
13941  if (is_inline()) {
13942  int start_index = parameter_count_without_receiver();
13943  int length =
13944  std::max(0, argument_count_without_receiver() - start_index);
13945  ValueNode* elements =
13946  BuildInlinedArgumentsElements(start_index, length);
13947  return CreateArgumentsObject(
13948  broker()->target_native_context().js_array_packed_elements_map(
13949  broker()),
13950  GetInt32Constant(length), elements);
13951  } else {
13952  ArgumentsLength* length = AddNewNode<ArgumentsLength>({});
13954  ArgumentsElements* elements = AddNewNode<ArgumentsElements>(
13957  RestLength* rest_length =
13958  AddNewNode<RestLength>({}, parameter_count_without_receiver());
13959  return CreateArgumentsObject(
13960  broker()->target_native_context().js_array_packed_elements_map(
13961  broker()),
13962  rest_length, elements);
13963  }
13964  }
13965 }
ValueNode * Select(FCond cond, FTrue if_true, FFalse if_false)
VirtualObject * CreateMappedArgumentsElements(compiler::MapRef map, int mapped_count, ValueNode *context, ValueNode *unmapped_elements)
ValueNode * BuildInlinedArgumentsElements(int start_index, int length)
ValueNode * BuildInlinedUnmappedArgumentsElements(int mapped_count)
BranchResult BuildBranchIfInt32Compare(BranchBuilder &builder, Operation op, ValueNode *lhs, ValueNode *rhs)
VirtualObject * CreateArgumentsObject(compiler::MapRef map, ValueNode *length, ValueNode *elements, std::optional< ValueNode * > callee={})

References broker(), v8::internal::DCHECK(), DCHECK_GT, v8::internal::kLessThan, v8::internal::kMappedArguments, v8::internal::kRestParameter, v8::internal::compiler::kSmi, v8::internal::kUnmappedArguments, v8::internal::length, v8::internal::TaggedArrayBase< SloppyArgumentsElements, SloppyArgumentsElementsShape >::OffsetOfElementAt(), v8::internal::maglev::VirtualObject::set(), v8::internal::tracing::type, and v8::internal::value.

+ Here is the call graph for this function:

◆ bytecode()

compiler::BytecodeArrayRef v8::internal::maglev::MaglevGraphBuilder::bytecode ( ) const
inlineprivate

Definition at line 3121 of file maglev-graph-builder.h.

3121  {
3122  return compilation_unit_->bytecode();
3123  }

Referenced by BuildMergeStates(), BuildRegisterFrameInitialization(), GetResultLocationAndSize(), KillPeeledLoopTargets(), MaglevGraphBuilder(), MarkBytecodeDead(), and VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ bytecode_analysis()

const compiler::BytecodeAnalysis& v8::internal::maglev::MaglevGraphBuilder::bytecode_analysis ( ) const
inlineprivate

Definition at line 3124 of file maglev-graph-builder.h.

3124  {
3125  return bytecode_analysis_;
3126  }

Referenced by BuildMergeStates(), and VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ CalculatePredecessorCounts()

void v8::internal::maglev::MaglevGraphBuilder::CalculatePredecessorCounts ( )
inlineprivate

Definition at line 3022 of file maglev-graph-builder.h.

3022  {
3023  // Add 1 after the end of the bytecode so we can always write to the offset
3024  // after the last bytecode.
3025  uint32_t array_length = bytecode().length() + 1;
3026  predecessor_count_ = zone()->AllocateArray<uint32_t>(array_length);
3029  array_length - entrypoint_);
3030 
3031  const int max_peelings = v8_flags.maglev_optimistic_peeled_loops ? 2 : 1;
3032  // We count jumps from peeled loops to outside of the loop twice.
3033  bool is_loop_peeling_iteration = false;
3034  std::optional<int> peeled_loop_end;
3035  interpreter::BytecodeArrayIterator iterator(bytecode().object());
3036  for (iterator.AdvanceTo(entrypoint_); !iterator.done();
3037  iterator.Advance()) {
3038  interpreter::Bytecode bytecode = iterator.current_bytecode();
3039  if (allow_loop_peeling_ &&
3040  bytecode_analysis().IsLoopHeader(iterator.current_offset())) {
3041  const compiler::LoopInfo& loop_info =
3042  bytecode_analysis().GetLoopInfoFor(iterator.current_offset());
3043  // Generators use irreducible control flow, which makes loop peeling too
3044  // complicated.
3045  int size = loop_info.loop_end() - loop_info.loop_start();
3046  if (loop_info.innermost() && !loop_info.resumable() &&
3047  iterator.next_offset() < loop_info.loop_end() &&
3048  size < v8_flags.maglev_loop_peeling_max_size &&
3050  v8_flags.maglev_loop_peeling_max_size_cumulative) {
3051  DCHECK(!is_loop_peeling_iteration);
3053  is_loop_peeling_iteration = true;
3054  loop_headers_to_peel_.Add(iterator.current_offset());
3055  peeled_loop_end = bytecode_analysis().GetLoopEndOffsetForInnermost(
3056  iterator.current_offset());
3057  }
3058  }
3060  if (is_loop_peeling_iteration &&
3061  bytecode == interpreter::Bytecode::kJumpLoop) {
3062  DCHECK_EQ(iterator.next_offset(), peeled_loop_end);
3063  is_loop_peeling_iteration = false;
3064  peeled_loop_end = {};
3065  }
3066  if (iterator.GetJumpTargetOffset() < entrypoint_) {
3067  static_assert(kLoopsMustBeEnteredThroughHeader);
3068  if (predecessor_count(iterator.GetJumpTargetOffset()) == 1) {
3069  // We encountered a JumpLoop whose loop header is not reachable
3070  // otherwise. This loop is either dead or the JumpLoop will bail
3071  // with DeoptimizeReason::kOSREarlyExit.
3072  InitializePredecessorCount(iterator.GetJumpTargetOffset(), 0);
3073  }
3074  } else {
3075  UpdatePredecessorCount(iterator.GetJumpTargetOffset(), 1);
3076  }
3077  if (is_loop_peeling_iteration &&
3078  iterator.GetJumpTargetOffset() >= *peeled_loop_end) {
3079  // Jumps from within the peeled loop to outside need to be counted
3080  // twice, once for the peeled and once for the regular loop body.
3081  UpdatePredecessorCount(iterator.GetJumpTargetOffset(), max_peelings);
3082  }
3084  UpdatePredecessorCount(iterator.next_offset(), -1);
3085  }
3087  for (auto offset : iterator.GetJumpTableTargetOffsets()) {
3088  UpdatePredecessorCount(offset.target_offset, 1);
3089  }
3092  UpdatePredecessorCount(iterator.next_offset(), -1);
3093  // Collect inline return jumps in the slot after the last bytecode.
3095  UpdatePredecessorCount(array_length - 1, 1);
3096  if (is_loop_peeling_iteration) {
3097  UpdatePredecessorCount(array_length - 1, max_peelings);
3098  }
3099  }
3100  }
3101  // TODO(leszeks): Also consider handler entries (the bytecode analysis)
3102  // will do this automatically I guess if we merge this into that.
3103  }
3104  if (!is_inline()) {
3106  }
3107  }
T * AllocateArray(size_t length)
Definition: zone.h:121
int GetLoopEndOffsetForInnermost(int header_offset) const
static constexpr bool UnconditionallyThrows(Bytecode bytecode)
Definition: bytecodes.h:890
static constexpr bool Returns(Bytecode bytecode)
Definition: bytecodes.h:883
static constexpr bool IsSwitch(Bytecode bytecode)
Definition: bytecodes.h:830
static constexpr bool IsConditionalJump(Bytecode bytecode)
Definition: bytecodes.h:775
static constexpr bool IsJump(Bytecode bytecode)
Definition: bytecodes.h:809
void add_peeled_bytecode_size(int size)
Definition: maglev-graph.h:120
int total_peeled_bytecode_size() const
Definition: maglev-graph.h:119
void MemsetUint32(uint32_t *dest, uint32_t value, size_t counter)
Definition: memcopy.h:267

References v8::internal::interpreter::BytecodeArrayIterator::Advance(), v8::internal::interpreter::BytecodeArrayIterator::AdvanceTo(), v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), v8::internal::interpreter::BytecodeArrayIterator::current_offset(), DCHECK, DCHECK_EQ, v8::internal::interpreter::BytecodeArrayIterator::done(), v8::internal::interpreter::BytecodeArrayIterator::GetJumpTableTargetOffsets(), v8::internal::interpreter::BytecodeArrayIterator::GetJumpTargetOffset(), v8::internal::compiler::LoopInfo::innermost(), v8::internal::length, v8::internal::compiler::LoopInfo::loop_end(), v8::internal::compiler::LoopInfo::loop_start(), v8::internal::MemsetUint32(), v8::internal::interpreter::BytecodeArrayIterator::next_offset(), v8::internal::compiler::LoopInfo::resumable(), size(), and v8::internal::v8_flags.

Referenced by MaglevGraphBuilder().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ caller_details()

MaglevCallerDetails* v8::internal::maglev::MaglevGraphBuilder::caller_details ( ) const
inline

Definition at line 362 of file maglev-graph-builder.h.

362 { return caller_details_; }

References caller_details_.

Referenced by MaglevGraphBuilder().

+ Here is the caller graph for this function:

◆ CanAllocateInlinedArgumentElements()

bool v8::internal::maglev::MaglevGraphBuilder::CanAllocateInlinedArgumentElements ( )
private

Definition at line 14207 of file maglev-graph-builder.cc.

14207  {
14208  DCHECK(is_inline());
14211 }
constexpr int kMaxRegularHeapObjectSize
Definition: globals.h:671

References v8::internal::DCHECK(), v8::internal::kMaxRegularHeapObjectSize, and v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::SizeFor().

+ Here is the call graph for this function:

◆ CanAllocateSloppyArgumentElements()

bool v8::internal::maglev::MaglevGraphBuilder::CanAllocateSloppyArgumentElements ( )
private

Definition at line 14202 of file maglev-graph-builder.cc.

14202  {
14205 }

References v8::internal::kMaxRegularHeapObjectSize, and v8::internal::TaggedArrayBase< SloppyArgumentsElements, SloppyArgumentsElementsShape >::SizeFor().

+ Here is the call graph for this function:

◆ CanElideWriteBarrier()

bool v8::internal::maglev::MaglevGraphBuilder::CanElideWriteBarrier ( ValueNode object,
ValueNode value 
)
private

Definition at line 5056 of file maglev-graph-builder.cc.

5057  {
5058  if (value->Is<RootConstant>() || value->Is<ConsStringMap>()) return true;
5061  return true;
5062  }
5063 
5064  // No need for a write barrier if both object and value are part of the same
5065  // folded young allocation.
5066  AllocationBlock* allocation = GetAllocation(object);
5067  if (allocation != nullptr && current_allocation_block_ == allocation &&
5068  allocation->allocation_type() == AllocationType::kYoung &&
5069  allocation == GetAllocation(value)) {
5070  allocation->set_elided_write_barriers_depend_on_type();
5071  return true;
5072  }
5073 
5074  // If tagged and not Smi, we cannot elide write barrier.
5075  if (value->is_tagged()) return false;
5076 
5077  // If its alternative conversion node is Smi, {value} will be converted to
5078  // a Smi when tagged.
5079  NodeInfo* node_info = GetOrCreateInfoFor(value);
5080  if (ValueNode* tagged_alt = node_info->alternative().tagged()) {
5081  DCHECK(tagged_alt->properties().is_conversion());
5082  return CheckType(tagged_alt, NodeType::kSmi);
5083  }
5084  return false;
5085 }
AllocationType allocation_type() const
Definition: maglev-ir.h:6464
void RecordUseReprHintIfPhi(ValueNode *node, UseRepresentation repr)

References v8::internal::maglev::AllocationBlock::allocation_type(), v8::internal::maglev::NodeInfo::alternative(), CheckType(), current_allocation_block_, v8::internal::DCHECK(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetAllocation(), GetOrCreateInfoFor(), GetType(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::compiler::kSmi, v8::internal::maglev::kTagged, v8::internal::kYoung, RecordUseReprHintIfPhi(), v8::internal::maglev::AllocationBlock::set_elided_write_barriers_depend_on_type(), and v8::internal::value.

Referenced by BuildStoreFixedArrayElement(), BuildStoreTaggedField(), and BuildStoreTaggedFieldNoWriteBarrier().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ CanInlineCall()

bool v8::internal::maglev::MaglevGraphBuilder::CanInlineCall ( compiler::SharedFunctionInfoRef  shared,
float  call_frequency 
)
private

Definition at line 8437 of file maglev-graph-builder.cc.

8438  {
8439  if (graph()->total_inlined_bytecode_size() >
8442  TRACE_CANNOT_INLINE("maximum inlined bytecode size");
8443  return false;
8444  }
8445  // TODO(olivf): This is a temporary stopgap to prevent infinite recursion when
8446  // inlining, because we currently excempt small functions from some of the
8447  // negative heuristics. We should refactor these heuristics and make sure they
8448  // make sense in the presence of (mutually) recursive inlining. Please do
8449  // *not* return true before this check.
8450  if (inlining_depth() > v8_flags.max_maglev_hard_inline_depth) {
8451  TRACE_CANNOT_INLINE("inlining depth ("
8452  << inlining_depth() << ") >= hard-max-depth ("
8453  << v8_flags.max_maglev_hard_inline_depth << ")");
8454  return false;
8455  }
8456  if (compilation_unit_->shared_function_info().equals(shared)) {
8457  TRACE_CANNOT_INLINE("direct recursion");
8458  return false;
8459  }
8460  SharedFunctionInfo::Inlineability inlineability =
8461  shared.GetInlineability(CodeKind::MAGLEV, broker());
8462  if (inlineability != SharedFunctionInfo::Inlineability::kIsInlineable) {
8463  TRACE_CANNOT_INLINE(inlineability);
8464  return false;
8465  }
8466  // TODO(victorgomes): Support NewTarget/RegisterInput in inlined functions.
8467  compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
8469  TRACE_CANNOT_INLINE("use unsupported NewTargetOrGenerator register");
8470  return false;
8471  }
8472  if (call_frequency < min_inlining_frequency()) {
8473  TRACE_CANNOT_INLINE("call frequency (" << call_frequency
8474  << ") < minimum threshold ("
8475  << min_inlining_frequency() << ")");
8476  return false;
8477  }
8479  TRACE_CANNOT_INLINE("big function, size ("
8480  << bytecode.length() << ") >= max-size ("
8481  << max_inlined_bytecode_size() << ")");
8482  return false;
8483  }
8484  return true;
8485 }
#define TRACE_CANNOT_INLINE(...)

References broker(), v8::internal::compiler::SharedFunctionInfoRef::GetBytecodeArray(), v8::internal::compiler::SharedFunctionInfoRef::GetInlineability(), graph(), v8::internal::compiler::BytecodeArrayRef::incoming_new_target_or_generator_register(), v8::internal::interpreter::Register::is_valid(), v8::internal::compiler::BytecodeArrayRef::length(), TRACE_CANNOT_INLINE, and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ CanSpeculateCall() [1/2]

bool v8::internal::maglev::MaglevGraphBuilder::CanSpeculateCall ( ) const
inlineprivate

◆ CanSpeculateCall() [2/2]

bool v8::internal::maglev::MaglevGraphBuilder::CanSpeculateCall ( std::initializer_list< SpeculationMode supported_modes) const
inlineprivate

Definition at line 3366 of file maglev-graph-builder.h.

3367  {
3368  return CanSpeculateCall() ||
3369  std::find(supported_modes.begin(), supported_modes.end(),
3370  current_speculation_mode_) != supported_modes.end();
3371  }

◆ CanTrackObjectChanges()

bool v8::internal::maglev::MaglevGraphBuilder::CanTrackObjectChanges ( ValueNode object,
TrackObjectMode  mode 
)
private

Definition at line 5146 of file maglev-graph-builder.cc.

5147  {
5148  DCHECK(!receiver->Is<VirtualObject>());
5149  if (!v8_flags.maglev_object_tracking) return false;
5150  if (!receiver->Is<InlinedAllocation>()) return false;
5151  InlinedAllocation* alloc = receiver->Cast<InlinedAllocation>();
5152  if (mode == TrackObjectMode::kStore) {
5153  // If we have two objects A and B, such that A points to B (it contains B in
5154  // one of its field), we cannot change B without also changing A, even if
5155  // both can be elided. For now, we escape both objects instead.
5156  if (graph_->allocations_elide_map().find(alloc) !=
5157  graph_->allocations_elide_map().end()) {
5158  return false;
5159  }
5160  if (alloc->IsEscaping()) return false;
5161  // Ensure object is escaped if we are within a try-catch block. This is
5162  // crucial because a deoptimization point inside the catch handler could
5163  // re-materialize objects differently, depending on whether the throw
5164  // occurred before or after this store. We could potentially relax this
5165  // requirement by verifying that no throwable nodes have been emitted since
5166  // the try-block started, but for now, err on the side of caution and
5167  // always escape.
5168  if (IsInsideTryBlock()) return false;
5169  } else {
5171  if (IsEscaping(graph_, alloc)) return false;
5172  }
5173  // We don't support loop phis inside VirtualObjects, so any access inside a
5174  // loop should escape the object, except for objects that were created since
5175  // the last loop header.
5176  if (IsInsideLoop()) {
5177  if (!is_loop_effect_tracking() ||
5178  !loop_effects_->allocations.contains(alloc)) {
5179  return false;
5180  }
5181  }
5182  // Iterate all live objects to be sure that the allocation is not escaping.
5183  SLOW_DCHECK(
5185  return true;
5186 }
#define SLOW_DCHECK(condition)
Definition: checks.h:21
bool VerifyIsNotEscaping(VirtualObjectList vos, InlinedAllocation *alloc)

References v8::internal::maglev::LoopEffects::allocations, v8::internal::maglev::Graph::allocations_elide_map(), v8::internal::maglev::NodeBase::Cast(), current_interpreter_frame_, v8::internal::DCHECK(), DCHECK_EQ, graph_, v8::internal::maglev::NodeBase::Is(), is_loop_effect_tracking(), v8::internal::maglev::InlinedAllocation::IsEscaping(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::IsEscaping(), IsInsideLoop(), IsInsideTryBlock(), kLoad, kStore, loop_effects_, mode(), SLOW_DCHECK, v8::internal::v8_flags, v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::VerifyIsNotEscaping(), and v8::internal::maglev::InterpreterFrameState::virtual_objects().

Referenced by BuildLoadFixedArrayElement(), BuildLoadFixedDoubleArrayElement(), and TryBuildStoreTaggedFieldToAllocation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ CanTreatHoleAsUndefined()

bool v8::internal::maglev::MaglevGraphBuilder::CanTreatHoleAsUndefined ( base::Vector< const compiler::MapRef > const &  receiver_maps)
private

Definition at line 5397 of file maglev-graph-builder.cc.

5398  {
5399  // Check if all {receiver_maps} have one of the initial Array.prototype
5400  // or Object.prototype objects as their prototype (in any of the current
5401  // native contexts, as the global Array protector works isolate-wide).
5402  for (compiler::MapRef receiver_map : receiver_maps) {
5403  compiler::ObjectRef receiver_prototype = receiver_map.prototype(broker());
5404  if (!receiver_prototype.IsJSObject() ||
5405  !broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
5406  return false;
5407  }
5408  }
5409 
5410  // Check if the array prototype chain is intact.
5412 }

References broker(), v8::internal::compiler::JSHeapBroker::dependencies(), and v8::internal::compiler::CompilationDependencies::DependOnNoElementsProtector().

+ Here is the call graph for this function:

◆ CheckContextExtensions()

bool v8::internal::maglev::MaglevGraphBuilder::CheckContextExtensions ( size_t  depth)
private

Definition at line 4363 of file maglev-graph-builder.cc.

4363  {
4364  compiler::OptionalScopeInfoRef maybe_scope_info =
4366  if (!maybe_scope_info.has_value()) return false;
4367  compiler::ScopeInfoRef scope_info = maybe_scope_info.value();
4368  for (uint32_t d = 0; d < depth; d++) {
4369  CHECK_NE(scope_info.scope_type(), ScopeType::SCRIPT_SCOPE);
4370  CHECK_NE(scope_info.scope_type(), ScopeType::REPL_MODE_SCOPE);
4371  if (scope_info.HasContextExtensionSlot() &&
4372  !broker()->dependencies()->DependOnEmptyContextExtension(scope_info)) {
4373  // Using EmptyContextExtension dependency is not possible for this
4374  // scope_info, so generate dynamic checks.
4375  ValueNode* context = GetContextAtDepth(GetContext(), d);
4376  // Only support known contexts so that we can check that there's no
4377  // extension at compile time. Otherwise we could end up in a deopt loop
4378  // once we do get an extension.
4379  compiler::OptionalHeapObjectRef maybe_ref = TryGetConstant(context);
4380  if (!maybe_ref) return false;
4381  compiler::ContextRef context_ref = maybe_ref.value().AsContext();
4382  compiler::OptionalObjectRef extension_ref =
4383  context_ref.get(broker(), Context::EXTENSION_INDEX);
4384  // The extension may be concurrently installed while we're checking the
4385  // context, in which case it may still be uninitialized. This still
4386  // means an extension is about to appear, so we should block this
4387  // optimization.
4388  if (!extension_ref) return false;
4389  if (!extension_ref->IsUndefined()) return false;
4390  ValueNode* extension =
4393  AddNewNode<CheckValue>({extension}, broker()->undefined_value(),
4394  DeoptimizeReason::kUnexpectedContextExtension);
4395  }
4396  CHECK_IMPLIES(!scope_info.HasOuterScopeInfo(), d + 1 == depth);
4397  if (scope_info.HasOuterScopeInfo()) {
4398  scope_info = scope_info.OuterScopeInfo(broker());
4399  }
4400  }
4401  return true;
4402 }
compiler::OptionalScopeInfoRef TryGetScopeInfo(ValueNode *context)
Definition: maglev-graph.h:368
@ REPL_MODE_SCOPE
Definition: globals.h:1910

References broker(), CHECK_IMPLIES, CHECK_NE, v8::internal::Context::EXTENSION_INDEX, v8::internal::compiler::ContextRef::get(), GetContext(), GetContextAtDepth(), graph(), v8::internal::compiler::ScopeInfoRef::HasContextExtensionSlot(), v8::internal::compiler::ScopeInfoRef::HasOuterScopeInfo(), kMutable, v8::internal::kNoContextCells, LoadAndCacheContextSlot(), v8::internal::compiler::ScopeInfoRef::OuterScopeInfo(), v8::internal::REPL_MODE_SCOPE, v8::internal::compiler::ScopeInfoRef::scope_type(), v8::internal::SCRIPT_SCOPE, TryGetConstant(), and v8::internal::maglev::Graph::TryGetScopeInfo().

+ Here is the call graph for this function:

◆ CheckType()

bool v8::internal::maglev::MaglevGraphBuilder::CheckType ( ValueNode node,
NodeType  type,
NodeType old = nullptr 
)
inlineprivate

Definition at line 528 of file maglev-graph-builder.h.

528  {
529  return known_node_aspects().CheckType(broker(), node, type, old);
530  }
bool CheckType(compiler::JSHeapBroker *broker, ValueNode *node, NodeType type, NodeType *current_type)

References broker(), v8::internal::maglev::KnownNodeAspects::CheckType(), known_node_aspects(), and v8::internal::tracing::type.

Referenced by BuildTestUndetectable(), CanElideWriteBarrier(), GetInt32(), GetInt32ElementIndex(), GetInternalizedString(), TryReduceCompareEqualAgainstConstant(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ CheckTypes()

NodeType v8::internal::maglev::MaglevGraphBuilder::CheckTypes ( ValueNode node,
std::initializer_list< NodeType types 
)
inlineprivate

Definition at line 531 of file maglev-graph-builder.h.

531  {
532  return known_node_aspects().CheckTypes(broker(), node, types);
533  }
NodeType CheckTypes(compiler::JSHeapBroker *broker, ValueNode *node, std::initializer_list< NodeType > types)

References broker(), v8::internal::maglev::KnownNodeAspects::CheckTypes(), and known_node_aspects().

Referenced by TryReduceTypeOf().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ClearCurrentAllocationBlock()

void v8::internal::maglev::MaglevGraphBuilder::ClearCurrentAllocationBlock ( )
private

Definition at line 13662 of file maglev-graph-builder.cc.

13662  {
13663  current_allocation_block_ = nullptr;
13664 }

◆ ClobberAccumulator()

void v8::internal::maglev::MaglevGraphBuilder::ClobberAccumulator ( )
inlineprivate

Definition at line 1725 of file maglev-graph-builder.h.

1725  {
1729  graph()->GetRootConstant(RootIndex::kOptimizedOut));
1730  }
static bool ClobbersAccumulator(Bytecode bytecode)
Definition: bytecodes.h:709

References DCHECK, and graph().

+ Here is the call graph for this function:

◆ compilation_unit()

◆ ContextMayAlias()

bool v8::internal::maglev::MaglevGraphBuilder::ContextMayAlias ( ValueNode context,
compiler::OptionalScopeInfoRef  scope_info 
)
private

Definition at line 3601 of file maglev-graph-builder.cc.

3602  {
3603  // Distinguishing contexts by their scope info only works if scope infos are
3604  // guaranteed to be unique.
3605  // TODO(crbug.com/401059828): reenable when crashes are gone.
3606  if ((true) || !v8_flags.reuse_scope_infos) return true;
3607  if (!scope_info.has_value()) {
3608  return true;
3609  }
3610  auto other = graph()->TryGetScopeInfo(context);
3611  if (!other.has_value()) {
3612  return true;
3613  }
3614  return scope_info->equals(*other);
3615 }

References graph(), v8::internal::maglev::Graph::TryGetScopeInfo(), and v8::internal::v8_flags.

Referenced by StoreAndCacheContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ConvertForStoring()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::ConvertForStoring ( ValueNode node,
ElementsKind  kind 
)
private

Definition at line 6607 of file maglev-graph-builder.cc.

6608  {
6609  if (IsDoubleElementsKind(kind)) {
6610 #ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
6613  const bool convert_hole_to_undefined = true;
6614  return GetHoleyFloat64(value, convert_hole_to_undefined);
6615  }
6616 #endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
6617  // Make sure we do not store signalling NaNs into double arrays.
6618  // TODO(leszeks): Consider making this a bit on StoreFixedDoubleArrayElement
6619  // rather than a separate node.
6620  return GetSilencedNaN(GetFloat64(value));
6621  }
6622  if (IsSmiElementsKind(kind)) return GetSmiValue(value);
6623  return value;
6624 }
ValueNode * GetHoleyFloat64(ValueNode *value, bool convert_hole_to_undefined)
ValueNode * GetSilencedNaN(ValueNode *value)
ReduceResult GetSmiValue(ValueNode *value, UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
constexpr bool IsSmiElementsKind(ElementsKind kind)
DONT_OVERRIDE DISABLE_ALLOCATION_SITES DISABLE_ALLOCATION_SITES HOLEY_DOUBLE_ELEMENTS
constexpr bool IsDoubleElementsKind(ElementsKind kind)

References v8::debug::anonymous_namespace{debug-interface.cc}::GetSmiValue(), v8::internal::HOLEY_DOUBLE_ELEMENTS, v8::internal::IsDoubleElementsKind(), v8::internal::IsSmiElementsKind(), and v8::internal::value.

+ Here is the call graph for this function:

◆ ConvertInputTo()

template<UseReprHintRecording hint = UseReprHintRecording::kRecord>
ValueNode* v8::internal::maglev::MaglevGraphBuilder::ConvertInputTo ( ValueNode input,
ValueRepresentation  expected 
)
inlineprivate

Definition at line 1921 of file maglev-graph-builder.h.

1921  {
1922  ValueRepresentation repr = input->properties().value_representation();
1923  if (repr == expected) return input;
1924  switch (expected) {
1926  return GetTaggedValue(input, hint);
1928  return GetInt32(input);
1931  return GetFloat64(input);
1934  // These conversion should be explicitly done beforehand.
1935  UNREACHABLE();
1936  }
1937  }
ValueNode * GetInt32(ValueNode *value, bool can_be_heap_number=false)

References v8::internal::anonymous_namespace{ic.cc}::kIntPtr, v8::internal::maglev::NodeBase::properties(), UNREACHABLE, and v8::internal::maglev::OpProperties::value_representation().

+ Here is the call graph for this function:

◆ CreateArgumentsObject()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateArgumentsObject ( compiler::MapRef  map,
ValueNode length,
ValueNode elements,
std::optional< ValueNode * >  callee = {} 
)
private

Definition at line 13523 of file maglev-graph-builder.cc.

13525  {
13526  DCHECK_EQ(JSSloppyArgumentsObject::kLengthOffset, JSArray::kLengthOffset);
13527  DCHECK_EQ(JSStrictArgumentsObject::kLengthOffset, JSArray::kLengthOffset);
13528  int slot_count = map.instance_size() / kTaggedSize;
13529  SBXCHECK_EQ(slot_count, callee.has_value() ? 5 : 4);
13530  VirtualObject* arguments = CreateVirtualObject(map, slot_count);
13531  arguments->set(JSArray::kPropertiesOrHashOffset,
13532  GetRootConstant(RootIndex::kEmptyFixedArray));
13533  arguments->set(JSArray::kElementsOffset, elements);
13534  CHECK(length->Is<Int32Constant>() || length->Is<ArgumentsLength>() ||
13535  length->Is<RestLength>());
13536  arguments->set(JSArray::kLengthOffset, length);
13537  if (callee.has_value()) {
13538  arguments->set(JSSloppyArgumentsObject::kCalleeOffset, callee.value());
13539  }
13540  DCHECK(arguments->map().IsJSArgumentsObjectMap() ||
13541  arguments->map().IsJSArrayMap());
13542  return arguments;
13543 }
#define SBXCHECK_EQ(lhs, rhs)
Definition: check.h:55
VirtualObject * CreateVirtualObject(compiler::MapRef map, uint32_t slot_count_including_map)

References CHECK, v8::internal::DCHECK(), DCHECK_EQ, v8::internal::compiler::MapRef::instance_size(), v8::internal::kTaggedSize, v8::internal::length, v8::internal::maglev::VirtualObject::map(), SBXCHECK_EQ, and v8::internal::maglev::VirtualObject::set().

+ Here is the call graph for this function:

◆ CreateBranchBuilder() [1/2]

BranchBuilder v8::internal::maglev::MaglevGraphBuilder::CreateBranchBuilder ( BranchType  jump_type = BranchType::kBranchIfTrue)
inlineprivate

Definition at line 2972 of file maglev-graph-builder.h.

2973  {
2974  return BranchBuilder(this, jump_type);
2975  }

◆ CreateBranchBuilder() [2/2]

BranchBuilder v8::internal::maglev::MaglevGraphBuilder::CreateBranchBuilder ( MaglevSubGraphBuilder subgraph,
MaglevSubGraphBuilder::Label jump_label,
BranchType  jump_type = BranchType::kBranchIfTrue 
)
inlineprivate

Definition at line 2976 of file maglev-graph-builder.h.

2978  {
2979  return BranchBuilder(this, subgraph, jump_type, jump_label);
2980  }

◆ CreateConsString()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateConsString ( ValueNode map,
ValueNode length,
ValueNode first,
ValueNode second 
)
private

Definition at line 13412 of file maglev-graph-builder.cc.

13415  {
13416  return NodeBase::New<VirtualObject>(
13417  zone(), 0, NewObjectId(),
13418  VirtualObject::VirtualConsString{map, length, {first, second}});
13419 }

References v8::internal::length.

Referenced by TryBuildNewConsString().

+ Here is the caller graph for this function:

◆ CreateContext()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateContext ( compiler::MapRef  map,
int  length,
compiler::ScopeInfoRef  scope_info,
ValueNode previous_context,
std::optional< ValueNode * >  extension = {} 
)
private

Definition at line 13499 of file maglev-graph-builder.cc.

13501  {
13502  int slot_count = FixedArray::SizeFor(length) / kTaggedSize;
13503  VirtualObject* context = CreateVirtualObject(map, slot_count);
13504  context->set(Context::kLengthOffset, GetInt32Constant(length));
13506  GetConstant(scope_info));
13508  previous_context);
13509  int index = Context::PREVIOUS_INDEX + 1;
13510  if (extension.has_value()) {
13512  extension.value());
13513  index++;
13514  }
13515  for (; index < length; index++) {
13516  context->set(Context::OffsetOfElementAt(index),
13517  GetRootConstant(RootIndex::kUndefinedValue));
13518  }
13519  EnsureType(context, NodeType::kContext);
13520  return context;
13521 }
static constexpr int OffsetOfElementAt(int index)
Definition: contexts.h:524

References v8::internal::Context::EXTENSION_INDEX, v8::internal::index, v8::internal::kTaggedSize, v8::internal::length, v8::internal::Context::OffsetOfElementAt(), v8::internal::Context::PREVIOUS_INDEX, v8::internal::Context::SCOPE_INFO_INDEX, v8::internal::maglev::VirtualObject::set(), and v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::SizeFor().

+ Here is the call graph for this function:

◆ CreateDoubleFixedArray()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateDoubleFixedArray ( uint32_t  elements_length,
compiler::FixedDoubleArrayRef  elements 
)
private

Definition at line 13403 of file maglev-graph-builder.cc.

13404  {
13405  // VirtualObjects are not added to the Maglev graph.
13406  VirtualObject* vobject = NodeBase::New<VirtualObject>(
13407  zone(), 0, broker()->fixed_double_array_map(), NewObjectId(),
13408  elements_length, elements);
13409  return vobject;
13410 }

References broker().

+ Here is the call graph for this function:

◆ CreateEdgeSplitBlock()

BasicBlock* v8::internal::maglev::MaglevGraphBuilder::CreateEdgeSplitBlock ( BasicBlockRef jump_targets,
BasicBlock predecessor 
)
inlineprivate

Definition at line 592 of file maglev-graph-builder.h.

593  {
594  if (v8_flags.trace_maglev_graph_building) {
595  std::cout << "== New empty block ==" << std::endl;
597  }
599  current_block_ = zone()->New<BasicBlock>(nullptr, zone());
600  BasicBlock* result = FinishBlock<Jump>({}, &jump_targets);
601  result->set_edge_split_block(predecessor);
602 #ifdef DEBUG
603  new_nodes_.clear();
604 #endif
605  return result;
606  }

References current_block_, DCHECK_NULL, v8::internal::Zone::New(), PrintVirtualObjects(), v8::base::internal::result, v8::internal::v8_flags, and zone().

Referenced by ProcessMergePointPredecessors().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ CreateFixedArray()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateFixedArray ( compiler::MapRef  map,
int  length 
)
private

Definition at line 13489 of file maglev-graph-builder.cc.

13490  {
13491  int slot_count = FixedArray::SizeFor(length) / kTaggedSize;
13492  VirtualObject* array = CreateVirtualObject(map, slot_count);
13493  array->set(offsetof(FixedArray, length_), GetInt32Constant(length));
13494  array->ClearSlots(offsetof(FixedArray, length_),
13495  GetRootConstant(RootIndex::kOnePointerFillerMap));
13496  return array;
13497 }

References v8::internal::maglev::VirtualObject::ClearSlots(), v8::internal::kTaggedSize, v8::internal::length, v8::internal::maglev::VirtualObject::set(), and v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::SizeFor().

+ Here is the call graph for this function:

◆ CreateHeapNumber()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateHeapNumber ( Float64  value)
private

Definition at line 13396 of file maglev-graph-builder.cc.

13396  {
13397  // VirtualObjects are not added to the Maglev graph.
13398  VirtualObject* vobject = NodeBase::New<VirtualObject>(
13399  zone(), 0, broker()->heap_number_map(), NewObjectId(), value);
13400  return vobject;
13401 }

References broker(), and v8::internal::value.

+ Here is the call graph for this function:

◆ CreateJSArray()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::CreateJSArray ( compiler::MapRef  map,
int  instance_size,
ValueNode length 
)
private

Definition at line 13436 of file maglev-graph-builder.cc.

13438  {
13439  int slot_count = instance_size / kTaggedSize;
13440  SBXCHECK_GE(slot_count, 4);
13441  VirtualObject* object = CreateVirtualObject(map, slot_count);
13442  object->set(JSArray::kPropertiesOrHashOffset,
13443  GetRootConstant(RootIndex::kEmptyFixedArray));
13444  // Either the value is a Smi already, or we force a conversion to Smi and
13445  // cache the value in its alternative representation node.
13447  object->set(JSArray::kElementsOffset,
13448  GetRootConstant(RootIndex::kEmptyFixedArray));
13449  object->set(JSArray::kLengthOffset, length);
13450  object->ClearSlots(JSArray::kLengthOffset,
13451  GetRootConstant(RootIndex::kOnePointerFillerMap));
13452  return object;
13453 }

References v8::debug::anonymous_namespace{debug-interface.cc}::GetSmiValue(), v8::internal::kTaggedSize, v8::internal::length, RETURN_IF_ABORT, and SBXCHECK_GE.

+ Here is the call graph for this function:

◆ CreateJSArrayIterator()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateJSArrayIterator ( compiler::MapRef  map,
ValueNode iterated_object,
IterationKind  kind 
)
private

Definition at line 13455 of file maglev-graph-builder.cc.

13456  {
13457  int slot_count = map.instance_size() / kTaggedSize;
13458  SBXCHECK_EQ(slot_count, 6);
13459  VirtualObject* object = CreateVirtualObject(map, slot_count);
13460  object->set(JSArrayIterator::kPropertiesOrHashOffset,
13461  GetRootConstant(RootIndex::kEmptyFixedArray));
13462  object->set(JSArrayIterator::kElementsOffset,
13463  GetRootConstant(RootIndex::kEmptyFixedArray));
13464  object->set(JSArrayIterator::kIteratedObjectOffset, iterated_object);
13465  object->set(JSArrayIterator::kNextIndexOffset, GetInt32Constant(0));
13466  object->set(JSArrayIterator::kKindOffset,
13467  GetInt32Constant(static_cast<int>(kind)));
13468  return object;
13469 }

References v8::internal::compiler::MapRef::instance_size(), v8::internal::kTaggedSize, and SBXCHECK_EQ.

+ Here is the call graph for this function:

◆ CreateJSConstructor()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateJSConstructor ( compiler::JSFunctionRef  constructor)
private

Definition at line 13471 of file maglev-graph-builder.cc.

13472  {
13473  compiler::SlackTrackingPrediction prediction =
13475  constructor);
13476  int slot_count = prediction.instance_size() / kTaggedSize;
13477  VirtualObject* object =
13478  CreateVirtualObject(constructor.initial_map(broker()), slot_count);
13479  SBXCHECK_GE(slot_count, 3);
13480  object->set(JSObject::kPropertiesOrHashOffset,
13481  GetRootConstant(RootIndex::kEmptyFixedArray));
13482  object->set(JSObject::kElementsOffset,
13483  GetRootConstant(RootIndex::kEmptyFixedArray));
13484  object->ClearSlots(JSObject::kElementsOffset,
13485  GetRootConstant(RootIndex::kOnePointerFillerMap));
13486  return object;
13487 }
SlackTrackingPrediction DependOnInitialMapInstanceSizePrediction(JSFunctionRef function)

References broker(), v8::internal::compiler::JSFunctionRef::initial_map(), v8::internal::compiler::SlackTrackingPrediction::instance_size(), v8::internal::kTaggedSize, and SBXCHECK_GE.

+ Here is the call graph for this function:

◆ CreateJSGeneratorObject()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateJSGeneratorObject ( compiler::MapRef  map,
int  instance_size,
ValueNode context,
ValueNode closure,
ValueNode receiver,
ValueNode register_file 
)
private

Definition at line 13577 of file maglev-graph-builder.cc.

13579  {
13580  int slot_count = instance_size / kTaggedSize;
13581  InstanceType instance_type = map.instance_type();
13582  DCHECK(instance_type == JS_GENERATOR_OBJECT_TYPE ||
13583  instance_type == JS_ASYNC_GENERATOR_OBJECT_TYPE);
13584  SBXCHECK_GE(slot_count, instance_type == JS_GENERATOR_OBJECT_TYPE ? 10 : 12);
13585  VirtualObject* object = CreateVirtualObject(map, slot_count);
13586  object->set(JSGeneratorObject::kPropertiesOrHashOffset,
13587  GetRootConstant(RootIndex::kEmptyFixedArray));
13588  object->set(JSGeneratorObject::kElementsOffset,
13589  GetRootConstant(RootIndex::kEmptyFixedArray));
13590  object->set(JSGeneratorObject::kContextOffset, context);
13591  object->set(JSGeneratorObject::kFunctionOffset, closure);
13592  object->set(JSGeneratorObject::kReceiverOffset, receiver);
13593  object->set(JSGeneratorObject::kInputOrDebugPosOffset,
13594  GetRootConstant(RootIndex::kUndefinedValue));
13595  object->set(JSGeneratorObject::kResumeModeOffset,
13597  object->set(JSGeneratorObject::kContinuationOffset,
13599  object->set(JSGeneratorObject::kParametersAndRegistersOffset, register_file);
13600  if (instance_type == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
13601  object->set(JSAsyncGeneratorObject::kQueueOffset,
13602  GetRootConstant(RootIndex::kUndefinedValue));
13603  object->set(JSAsyncGeneratorObject::kIsAwaitingOffset, GetInt32Constant(0));
13604  }
13605  return object;
13606 }
static const int kGeneratorExecuting
Definition: js-generator.h:41

References v8::internal::DCHECK(), v8::internal::compiler::MapRef::instance_type(), v8::internal::JSGeneratorObject::kGeneratorExecuting, v8::internal::JSGeneratorObject::kNext, v8::internal::kTaggedSize, and SBXCHECK_GE.

+ Here is the call graph for this function:

◆ CreateJSIteratorResult()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateJSIteratorResult ( compiler::MapRef  map,
ValueNode value,
ValueNode done 
)
private

Definition at line 13608 of file maglev-graph-builder.cc.

13610  {
13611  static_assert(JSIteratorResult::kSize == 5 * kTaggedSize);
13612  int slot_count = JSIteratorResult::kSize / kTaggedSize;
13613  VirtualObject* iter_result = CreateVirtualObject(map, slot_count);
13614  iter_result->set(JSIteratorResult::kPropertiesOrHashOffset,
13615  GetRootConstant(RootIndex::kEmptyFixedArray));
13616  iter_result->set(JSIteratorResult::kElementsOffset,
13617  GetRootConstant(RootIndex::kEmptyFixedArray));
13618  iter_result->set(JSIteratorResult::kValueOffset, value);
13619  iter_result->set(JSIteratorResult::kDoneOffset, done);
13620  return iter_result;
13621 }

References v8::internal::kTaggedSize, v8::internal::maglev::VirtualObject::set(), and v8::internal::value.

+ Here is the call graph for this function:

◆ CreateJSObject()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateJSObject ( compiler::MapRef  map)
private

Definition at line 13421 of file maglev-graph-builder.cc.

13421  {
13422  DCHECK(!map.is_dictionary_map());
13423  DCHECK(!map.IsInobjectSlackTrackingInProgress());
13424  int slot_count = map.instance_size() / kTaggedSize;
13425  SBXCHECK_GE(slot_count, 3);
13426  VirtualObject* object = CreateVirtualObject(map, slot_count);
13427  object->set(JSObject::kPropertiesOrHashOffset,
13428  GetRootConstant(RootIndex::kEmptyFixedArray));
13429  object->set(JSObject::kElementsOffset,
13430  GetRootConstant(RootIndex::kEmptyFixedArray));
13431  object->ClearSlots(JSObject::kElementsOffset,
13432  GetRootConstant(RootIndex::kOnePointerFillerMap));
13433  return object;
13434 }

References v8::internal::DCHECK(), v8::internal::compiler::MapRef::instance_size(), v8::internal::compiler::MapRef::is_dictionary_map(), v8::internal::compiler::MapRef::IsInobjectSlackTrackingInProgress(), v8::internal::kTaggedSize, and SBXCHECK_GE.

+ Here is the call graph for this function:

◆ CreateJSStringIterator()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateJSStringIterator ( compiler::MapRef  map,
ValueNode string 
)
private

Definition at line 13623 of file maglev-graph-builder.cc.

13624  {
13625  static_assert(JSStringIterator::kHeaderSize == 5 * kTaggedSize);
13626  int slot_count = JSStringIterator::kHeaderSize / kTaggedSize;
13627  VirtualObject* string_iter = CreateVirtualObject(map, slot_count);
13628  string_iter->set(JSStringIterator::kPropertiesOrHashOffset,
13629  GetRootConstant(RootIndex::kEmptyFixedArray));
13630  string_iter->set(JSStringIterator::kElementsOffset,
13631  GetRootConstant(RootIndex::kEmptyFixedArray));
13632  string_iter->set(JSStringIterator::kStringOffset, string);
13633  string_iter->set(JSStringIterator::kIndexOffset, GetInt32Constant(0));
13634  return string_iter;
13635 }

References v8::internal::kTaggedSize, and v8::internal::maglev::VirtualObject::set().

+ Here is the call graph for this function:

◆ CreateMappedArgumentsElements()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateMappedArgumentsElements ( compiler::MapRef  map,
int  mapped_count,
ValueNode context,
ValueNode unmapped_elements 
)
private

Definition at line 13545 of file maglev-graph-builder.cc.

13547  {
13548  int slot_count = SloppyArgumentsElements::SizeFor(mapped_count) / kTaggedSize;
13549  VirtualObject* elements = CreateVirtualObject(map, slot_count);
13550  elements->set(offsetof(SloppyArgumentsElements, length_),
13551  GetInt32Constant(mapped_count));
13552  elements->set(offsetof(SloppyArgumentsElements, context_), context);
13553  elements->set(offsetof(SloppyArgumentsElements, arguments_),
13554  unmapped_elements);
13555  return elements;
13556 }

References v8::internal::kTaggedSize, v8::internal::maglev::VirtualObject::set(), and v8::internal::TaggedArrayBase< SloppyArgumentsElements, SloppyArgumentsElementsShape >::SizeFor().

+ Here is the call graph for this function:

◆ CreateNewConstantNode()

template<typename NodeT , typename... Args>
NodeT* v8::internal::maglev::MaglevGraphBuilder::CreateNewConstantNode ( Args &&...  args) const
inlineprivate

Definition at line 1124 of file maglev-graph-builder.h.

1124  {
1125  static_assert(IsConstantNode(Node::opcode_of<NodeT>));
1126  NodeT* node = NodeBase::New<NodeT>(zone(), std::forward<Args>(args)...);
1127  static_assert(!NodeT::kProperties.can_eager_deopt());
1128  static_assert(!NodeT::kProperties.can_lazy_deopt());
1129  static_assert(!NodeT::kProperties.can_throw());
1130  static_assert(!NodeT::kProperties.can_write());
1132  if (v8_flags.trace_maglev_graph_building) {
1133  std::cout << " " << node << " "
1134  << PrintNodeLabel(graph_labeller(), node) << ": "
1135  << PrintNode(graph_labeller(), node) << std::endl;
1136  }
1137  return node;
1138  }

References v8::base::args, graph_labeller(), has_graph_labeller(), v8::internal::maglev::IsConstantNode(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::maglev::MaglevGraphLabeller::RegisterNode(), v8::internal::v8_flags, and zone().

+ Here is the call graph for this function:

◆ CreateRegExpLiteralObject()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateRegExpLiteralObject ( compiler::MapRef  map,
compiler::RegExpBoilerplateDescriptionRef  literal 
)
private

Definition at line 13558 of file maglev-graph-builder.cc.

13559  {
13561  int slot_count = JSRegExp::Size() / kTaggedSize;
13562  VirtualObject* regexp = CreateVirtualObject(map, slot_count);
13563  regexp->set(JSRegExp::kPropertiesOrHashOffset,
13564  GetRootConstant(RootIndex::kEmptyFixedArray));
13565  regexp->set(JSRegExp::kElementsOffset,
13566  GetRootConstant(RootIndex::kEmptyFixedArray));
13567  regexp->set(JSRegExp::kDataOffset,
13568  GetTrustedConstant(literal.data(broker()),
13569  kRegExpDataIndirectPointerTag));
13570  regexp->set(JSRegExp::kSourceOffset, GetConstant(literal.source(broker())));
13571  regexp->set(JSRegExp::kFlagsOffset, GetInt32Constant(literal.flags()));
13572  regexp->set(JSRegExp::kLastIndexOffset,
13574  return regexp;
13575 }
static constexpr int Size()
Definition: js-regexp.h:120
static constexpr int kLastIndexOffset
Definition: js-regexp.h:109
static constexpr int kInitialLastIndexValue
Definition: js-regexp.h:112
ValueNode * GetTrustedConstant(compiler::HeapObjectRef ref, IndirectPointerTag tag)

References broker(), v8::internal::compiler::RegExpBoilerplateDescriptionRef::data(), DCHECK_EQ, v8::internal::compiler::RegExpBoilerplateDescriptionRef::flags(), v8::internal::JSRegExp::kInitialLastIndexValue, v8::internal::JSRegExp::kLastIndexOffset, v8::internal::kTaggedSize, v8::internal::maglev::VirtualObject::set(), v8::internal::JSRegExp::Size(), and v8::internal::compiler::RegExpBoilerplateDescriptionRef::source().

+ Here is the call graph for this function:

◆ CreateVirtualObject()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::CreateVirtualObject ( compiler::MapRef  map,
uint32_t  slot_count_including_map 
)
private

Definition at line 13383 of file maglev-graph-builder.cc.

13384  {
13385  // VirtualObjects are not added to the Maglev graph.
13386  DCHECK_GT(slot_count_including_map, 0);
13387  uint32_t slot_count = slot_count_including_map - 1;
13388  ValueNode** slots = zone()->AllocateArray<ValueNode*>(slot_count);
13389  VirtualObject* vobject = NodeBase::New<VirtualObject>(
13390  zone(), 0, map, NewObjectId(), slot_count, slots);
13391  std::fill_n(slots, slot_count,
13392  GetRootConstant(RootIndex::kOnePointerFillerMap));
13393  return vobject;
13394 }

References DCHECK_GT.

◆ current_deopt_scope()

const DeoptFrameScope* v8::internal::maglev::MaglevGraphBuilder::current_deopt_scope ( ) const
inline

Definition at line 363 of file maglev-graph-builder.h.

363  {
364  return current_deopt_scope_;
365  }

References current_deopt_scope_.

◆ current_interpreter_frame()

const InterpreterFrameState& v8::internal::maglev::MaglevGraphBuilder::current_interpreter_frame ( ) const
inline

Definition at line 359 of file maglev-graph-builder.h.

359  {
361  }

References current_interpreter_frame_.

Referenced by v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::MaglevSubGraphBuilder(), and v8::internal::maglev::MergePointInterpreterFrameState::MergeThrow().

+ Here is the caller graph for this function:

◆ DecrementDeadPredecessorAndAccountForPeeling()

void v8::internal::maglev::MaglevGraphBuilder::DecrementDeadPredecessorAndAccountForPeeling ( uint32_t  offset)
inlineprivate

Definition at line 3184 of file maglev-graph-builder.h.

3184  {
3185  DCHECK_LE(offset, bytecode().length());
3186  DCHECK_GT(predecessor_count_[offset], 0);
3187  DCHECK_IMPLIES(merge_states_[offset],
3188  merge_states_[offset]->predecessor_count() ==
3189  predecessor_count_[offset] - 1);
3190  predecessor_count_[offset]--;
3191  if (in_peeled_iteration()) {
3193  } else {
3195  }
3196  }

References DCHECK, DCHECK_GT, DCHECK_IMPLIES, DCHECK_LE, and v8::internal::length.

◆ DeepCopyVirtualObject()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::DeepCopyVirtualObject ( VirtualObject vobj)
private

Definition at line 13375 of file maglev-graph-builder.cc.

13375  {
13376  CHECK_EQ(old->type(), VirtualObject::kDefault);
13377  VirtualObject* vobject = old->Clone(NewObjectId(), zone());
13379  old->allocation()->UpdateObject(vobject);
13380  return vobject;
13381 }

References v8::internal::maglev::VirtualObject::allocation(), CHECK_EQ, v8::internal::maglev::VirtualObject::Clone(), v8::internal::maglev::VirtualObject::type(), and v8::internal::maglev::InlinedAllocation::UpdateObject().

Referenced by GetModifiableObjectFromAllocation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ DoTryReduceMathRound()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::DoTryReduceMathRound ( CallArguments args,
Float64Round::Kind  kind 
)
private

Definition at line 10697 of file maglev-graph-builder.cc.

10698  {
10699  if (args.count() == 0) {
10700  return GetRootConstant(RootIndex::kNanValue);
10701  }
10702  ValueNode* arg = args[0];
10703  auto arg_repr = arg->value_representation();
10704  if (arg_repr == ValueRepresentation::kInt32 ||
10705  arg_repr == ValueRepresentation::kUint32 ||
10706  arg_repr == ValueRepresentation::kIntPtr) {
10707  return arg;
10708  }
10709  if (CheckType(arg, NodeType::kSmi)) return arg;
10710  if (!IsSupported(CpuOperation::kFloat64Round)) {
10711  return {};
10712  }
10713  if (arg_repr == ValueRepresentation::kFloat64 ||
10714  arg_repr == ValueRepresentation::kHoleyFloat64) {
10715  return AddNewNode<Float64Round>({arg}, kind);
10716  }
10718  if (CheckType(arg, NodeType::kNumberOrOddball)) {
10719  return AddNewNode<Float64Round>(
10721  arg, NodeType::kNumberOrOddball,
10723  kind);
10724  }
10725  if (!CanSpeculateCall()) return {};
10726  DeoptFrameScope continuation_scope(this, Float64Round::continuation(kind));
10727  ToNumberOrNumeric* conversion =
10728  AddNewNode<ToNumberOrNumeric>({arg}, Object::Conversion::kToNumber);
10729  ValueNode* float64_value = AddNewNode<UncheckedNumberOrOddballToFloat64>(
10731  return AddNewNode<Float64Round>({float64_value}, kind);
10732 }
static Builtin continuation(Kind kind)
Definition: maglev-ir.h:4512
ValueNode * GetHoleyFloat64ForToNumber(ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)

References v8::base::args, DCHECK_EQ, v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::IsSupported(), v8::internal::anonymous_namespace{ic.cc}::kIntPtr, v8::internal::compiler::kSmi, v8::internal::Object::kToNumber, and v8::internal::maglev::ValueNode::value_representation().

+ Here is the call graph for this function:

◆ EmitUnconditionalDeopt()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::EmitUnconditionalDeopt ( DeoptimizeReason  reason)
inlineprivate

Definition at line 728 of file maglev-graph-builder.h.

728  {
730  // Create a block rather than calling finish, since we don't yet know the
731  // next block's offset before the loop skipping the rest of the bytecodes.
732  FinishBlock<Deopt>({}, reason);
734  }

References current_block_, v8::internal::maglev::ReduceResult::DoneWithAbort(), and v8::internal::maglev::BasicBlock::set_deferred().

Referenced by BuildCheckHeapObject(), BuildCheckJSFunction(), BuildCheckJSReceiver(), BuildCheckJSReceiverOrNullOrUndefined(), BuildCheckMaps(), BuildCheckNumber(), BuildCheckSeqOneByteString(), BuildCheckSmi(), BuildCheckString(), BuildCheckStringOrOddball(), BuildCheckStringOrStringWrapper(), BuildCheckSymbol(), GetUint32ElementIndex(), TryBuildNamedAccess(), TryBuildPropertyCellLoad(), TryBuildPropertyCellStore(), TryBuildPropertyLoad(), TryBuildStoreField(), TrySpecializeStoreContextSlot(), VisitBinaryOperation(), VisitBinarySmiOperation(), VisitCompareOperation(), VisitSingleBytecode(), and VisitUnaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ EndLoopEffects()

void v8::internal::maglev::MaglevGraphBuilder::EndLoopEffects ( int  loop_header)
private

Definition at line 14433 of file maglev-graph-builder.cc.

14433  {
14435  DCHECK_EQ(loop_effects_->loop_header, loop_header);
14436  // TODO(olivf): Update merge states dominated by the loop header with
14437  // information we know to be unaffected by the loop.
14438  if (merge_states_[loop_header] && merge_states_[loop_header]->is_loop()) {
14440  }
14441  if (loop_effects_stack_.size() > 1) {
14442  LoopEffects* inner_effects = loop_effects_;
14443  loop_effects_ = *(loop_effects_stack_.end() - 2);
14444  loop_effects_->Merge(inner_effects);
14445  } else {
14446  loop_effects_ = nullptr;
14447  }
14448  loop_effects_stack_.pop_back();
14449 }

References DCHECK_EQ, and v8::internal::maglev::LoopEffects::Merge().

+ Here is the call graph for this function:

◆ EndPrologue()

BasicBlock * v8::internal::maglev::MaglevGraphBuilder::EndPrologue ( )

Definition at line 1051 of file maglev-graph-builder.cc.

1051  {
1052  BasicBlock* first_block;
1053  if (!is_inline() &&
1054  (v8_flags.maglev_hoist_osr_value_phi_untagging && graph_->is_osr())) {
1055  first_block =
1056  FinishBlock<CheckpointedJump>({}, &jump_targets_[entrypoint_]);
1057  } else {
1058  first_block = FinishBlock<Jump>({}, &jump_targets_[entrypoint_]);
1059  }
1060  MergeIntoFrameState(first_block, entrypoint_);
1061  return first_block;
1062 }
void MergeIntoFrameState(BasicBlock *block, int target)

References entrypoint_, graph_, is_inline(), v8::internal::maglev::Graph::is_osr(), jump_targets_, MergeIntoFrameState(), and v8::internal::v8_flags.

Referenced by Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ EnsureInt32() [1/2]

void v8::internal::maglev::MaglevGraphBuilder::EnsureInt32 ( interpreter::Register  reg)
inlineprivate

Definition at line 1587 of file maglev-graph-builder.h.

1587  {
1589  }

◆ EnsureInt32() [2/2]

void v8::internal::maglev::MaglevGraphBuilder::EnsureInt32 ( ValueNode value,
bool  can_be_heap_number = false 
)
inlineprivate

Definition at line 1581 of file maglev-graph-builder.h.

1581  {
1582  // Either the value is Int32 already, or we force a conversion to Int32 and
1583  // cache the value in its alternative representation node.
1584  GetInt32(value, can_be_heap_number);
1585  }

References v8::internal::value.

Referenced by BuildInt32BinarySmiOperationNode(), and TrySpecializeStoreContextSlot().

+ Here is the caller graph for this function:

◆ EnsureType() [1/2]

template<typename Function >
bool v8::internal::maglev::MaglevGraphBuilder::EnsureType ( ValueNode node,
NodeType  type,
Function  ensure_new_type 
)
inlineprivate

Definition at line 538 of file maglev-graph-builder.h.

538  {
539  return known_node_aspects().EnsureType<Function>(broker(), node, type,
540  ensure_new_type);
541  }
bool EnsureType(compiler::JSHeapBroker *broker, ValueNode *node, NodeType type, NodeType *old_type)

References broker(), v8::internal::maglev::KnownNodeAspects::EnsureType(), known_node_aspects(), and v8::internal::tracing::type.

+ Here is the call graph for this function:

◆ EnsureType() [2/2]

bool v8::internal::maglev::MaglevGraphBuilder::EnsureType ( ValueNode node,
NodeType  type,
NodeType old = nullptr 
)
inlineprivate

Definition at line 534 of file maglev-graph-builder.h.

534  {
535  return known_node_aspects().EnsureType(broker(), node, type, old);
536  }

References broker(), v8::internal::maglev::KnownNodeAspects::EnsureType(), known_node_aspects(), and v8::internal::tracing::type.

Referenced by BuildCheckHeapObject(), BuildCheckJSFunction(), BuildCheckJSReceiver(), BuildCheckJSReceiverOrNullOrUndefined(), BuildCheckNumber(), BuildCheckSeqOneByteString(), BuildCheckSmi(), BuildCheckString(), BuildCheckStringOrOddball(), BuildCheckStringOrStringWrapper(), BuildCheckSymbol(), BuildLoadFixedArrayLength(), BuildLoadJSFunctionContext(), BuildNumberOrOddballToFloat64(), BuildRegisterFrameInitialization(), BuildSmiUntag(), GetTruncatedInt32ForToNumber(), and TrySpecializeLoadContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ EscapeContext()

void v8::internal::maglev::MaglevGraphBuilder::EscapeContext ( )
private

Definition at line 180 of file maglev-graph-builder.cc.

180  {
181  ValueNode* context = GetContext();
182  if (InlinedAllocation* alloc = context->TryCast<InlinedAllocation>()) {
183  alloc->ForceEscaping();
184  }
185 }

References GetContext(), and v8::internal::maglev::NodeBase::TryCast().

+ Here is the call graph for this function:

◆ ExtendOrReallocateCurrentAllocationBlock()

InlinedAllocation * v8::internal::maglev::MaglevGraphBuilder::ExtendOrReallocateCurrentAllocationBlock ( AllocationType  allocation_type,
VirtualObject value 
)
private

Definition at line 13637 of file maglev-graph-builder.cc.

13638  {
13639  DCHECK_LE(vobject->size(), kMaxRegularHeapObjectSize);
13640  if (!current_allocation_block_ || v8_flags.maglev_allocation_folding == 0 ||
13641  current_allocation_block_->allocation_type() != allocation_type ||
13642  !v8_flags.inline_new || is_turbolev()) {
13644  AddNewNode<AllocationBlock>({}, allocation_type);
13645  }
13646 
13647  int current_size = current_allocation_block_->size();
13648  if (current_size + vobject->size() > kMaxRegularHeapObjectSize) {
13650  AddNewNode<AllocationBlock>({}, allocation_type);
13651  }
13652 
13653  DCHECK_GE(current_size, 0);
13654  InlinedAllocation* allocation =
13655  AddNewNode<InlinedAllocation>({current_allocation_block_}, vobject);
13656  graph()->allocations_escape_map().emplace(allocation, zone());
13657  current_allocation_block_->Add(allocation);
13658  vobject->set_allocation(allocation);
13659  return allocation;
13660 }
void Add(InlinedAllocation *alloc)
Definition: maglev-ir.h:6470

References DCHECK_GE, DCHECK_LE, graph(), v8::internal::kMaxRegularHeapObjectSize, v8::internal::maglev::VirtualObject::set_allocation(), v8::internal::maglev::VirtualObject::size(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ fast_hash_combine()

static size_t v8::internal::maglev::MaglevGraphBuilder::fast_hash_combine ( size_t  seed,
size_t  h 
)
inlinestaticprivate

Definition at line 3314 of file maglev-graph-builder.h.

3314  {
3315  // Implementation from boost. Good enough for GVN.
3316  return h + 0x9e3779b9 + (seed << 6) + (seed >> 2);
3317  }
refactor address components for immediate indexing make OptimizeMaglevOnNextCall optimize to turbofan instead of maglev filter for tracing turbofan compilation trace turbo cfg trace TurboFan s graph trimmer trace TurboFan s control equivalence trace TurboFan s register allocator trace stack load store counters for optimized code in run fuzzing &&concurrent_recompilation trace_turbo trace_turbo_scheduled trace_turbo_stack_accesses verify TurboFan machine graph of code stubs path where to generate builtins effects h(used for mksnapshot only)") DEFINE_BOOL_READONLY(fixed_array_bounds_checks

References h().

Referenced by AddNewNodeOrGetEquivalent().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ feedback()

compiler::FeedbackVectorRef v8::internal::maglev::MaglevGraphBuilder::feedback ( ) const
inlineprivate

Definition at line 3109 of file maglev-graph-builder.h.

3109  {
3110  return compilation_unit_->feedback();
3111  }

Referenced by BuildGenericBinaryOperationNode(), BuildGenericBinarySmiOperationNode(), BuildGenericUnaryOperationNode(), and TryBuildNamedAccess().

+ Here is the caller graph for this function:

◆ FeedbackNexusForOperand()

const FeedbackNexus v8::internal::maglev::MaglevGraphBuilder::FeedbackNexusForOperand ( int  slot_operand_index) const
inlineprivate

Definition at line 3112 of file maglev-graph-builder.h.

3112  {
3113  return FeedbackNexus(feedback().object(),
3114  GetSlotOperand(slot_operand_index),
3115  broker()->feedback_nexus_config());
3116  }

References broker().

Referenced by VisitBinaryOperation(), VisitBinarySmiOperation(), VisitCompareOperation(), and VisitUnaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ FeedbackNexusForSlot()

const FeedbackNexus v8::internal::maglev::MaglevGraphBuilder::FeedbackNexusForSlot ( FeedbackSlot  slot) const
inlineprivate

Definition at line 3117 of file maglev-graph-builder.h.

3117  {
3118  return FeedbackNexus(feedback().object(), slot,
3119  broker()->feedback_nexus_config());
3120  }

References broker().

+ Here is the call graph for this function:

◆ FindContinuationForPolymorphicPropertyLoad()

std::optional< MaglevGraphBuilder::ContinuationOffsets > v8::internal::maglev::MaglevGraphBuilder::FindContinuationForPolymorphicPropertyLoad ( )
private

Definition at line 7285 of file maglev-graph-builder.cc.

7285  {
7286  if (!v8_flags.maglev_poly_calls) {
7287  return {};
7288  }
7289 
7290  if (iterator_.current_bytecode() !=
7291  interpreter::Bytecode::kGetNamedProperty) {
7292  return {};
7293  }
7294 
7295  interpreter::Register loaded_property_register =
7297 
7298  // For now, we only generate the continuation for this pattern:
7299  // GeNamedProperty ...
7300  // Sta-REG
7301  // CallProperty REG ...
7302 
7303  iterator_.Advance();
7304  switch (iterator_.current_bytecode()) {
7305 #define CASE(Name, ...) \
7306  case interpreter::Bytecode::k##Name: \
7307  loaded_property_register = \
7308  interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name); \
7309  break;
7310 
7312 #undef CASE
7313  default:
7314  return {};
7315  }
7316 
7317  iterator_.Advance();
7318  switch (iterator_.current_bytecode()) {
7319 #define CASE(Name, ...) \
7320  case interpreter::Bytecode::k##Name: \
7321  if (iterator_.GetRegisterOperand(0) == loaded_property_register) { \
7322  return ContinuationOffsets{iterator_.current_offset(), \
7323  iterator_.next_offset()}; \
7324  } \
7325  break;
7326 
7328 #undef CASE
7329 
7330  default:
7331  break;
7332  }
7333  return {};
7334 
7335  // TODO(marja): Add other possible continuations.
7336 
7337  // Restriction: the bytecodes which can end the continuation must write
7338  // their result in the accumulator.
7339  // TODO(marja): Remove this restriction. To do that, VisitGetNamedProperty
7340  // can't assume it should call SetAccumulator after TryBuildLoadNamedProperty.
7341 }
#define CALL_PROPERTY_BYTECODES(V)
Definition: bytecodes.h:42
#define SHORT_STAR_BYTECODE_LIST(V)
Definition: bytecodes.h:24
static constexpr Register virtual_accumulator()

References CALL_PROPERTY_BYTECODES, CASE, SHORT_STAR_BYTECODE_LIST, v8::internal::v8_flags, and v8::internal::interpreter::Register::virtual_accumulator().

+ Here is the call graph for this function:

◆ FinishBlock()

template<typename ControlNodeT , typename... Args>
BasicBlock* v8::internal::maglev::MaglevGraphBuilder::FinishBlock ( std::initializer_list< ValueNode * >  control_inputs,
Args &&...  args 
)
inlineprivate

Definition at line 1976 of file maglev-graph-builder.h.

1977  {
1978  ControlNodeT* control_node = NodeBase::New<ControlNodeT>(
1979  zone(), control_inputs.size(), std::forward<Args>(args)...);
1980  SetNodeInputs(control_node, control_inputs);
1981  AttachEagerDeoptInfo(control_node);
1982  AttachDeoptCheckpoint(control_node);
1983  static_assert(!ControlNodeT::kProperties.can_lazy_deopt());
1984  static_assert(!ControlNodeT::kProperties.can_throw());
1985  static_assert(!ControlNodeT::kProperties.can_write());
1986  control_node->set_owner(current_block_);
1987  current_block_->set_control_node(control_node);
1988  // Clear unobserved context slot stores when there is any controlflow.
1989  // TODO(olivf): More precision could be achieved by tracking dominating
1990  // stores within known_node_aspects. For this we could use a stack of
1991  // stores, which we push on split and pop on merge.
1993 
1994  // TODO(olivf): Support allocation folding across control flow.
1996 
1997  BasicBlock* block = current_block_;
1999  current_block_ = nullptr;
2000 
2001  graph()->Add(block);
2002  if (has_graph_labeller()) {
2004  BytecodeOffset(iterator_.current_offset()),
2006  if (v8_flags.trace_maglev_graph_building) {
2007  bool kSkipTargets = true;
2008  std::cout << " " << control_node << " "
2009  << PrintNodeLabel(graph_labeller(), control_node) << ": "
2010  << PrintNode(graph_labeller(), control_node, kSkipTargets)
2011  << std::endl;
2012  }
2013  }
2014  return block;
2015  }
void set_control_node(ControlNode *control_node)
void Add(BasicBlock *block)
Definition: maglev-graph.h:72

References v8::base::args, graph(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ FinishInlinedBlockForCaller()

BasicBlock * v8::internal::maglev::MaglevGraphBuilder::FinishInlinedBlockForCaller ( ControlNode control_node,
ZoneVector< Node * >  rem_nodes_in_call_block 
)

Definition at line 14280 of file maglev-graph-builder.cc.

14281  {
14282  BasicBlock* result = current_block_;
14283  result->nodes().reserve(node_buffer().size() +
14284  rem_nodes_in_call_block.size());
14286  current_block_ = nullptr;
14287  for (Node* n : rem_nodes_in_call_block) {
14288  n->set_owner(result);
14289  result->nodes().push_back(n);
14290  }
14291  control_node->set_owner(result);
14292  CHECK_NULL(result->control_node());
14293  result->set_control_node(control_node);
14294 
14295  // Add the final block to the graph.
14296  graph_->Add(result);
14297  return result;
14298 }
#define CHECK_NULL(val)

References CHECK_NULL, v8::base::internal::result, v8::internal::maglev::NodeBase::set_owner(), v8::internal::ZoneVector< T >::size(), and size().

Referenced by v8::internal::maglev::MaglevInliner::BuildInlineFunction().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ FlushNodesToBlock()

void v8::internal::maglev::MaglevGraphBuilder::FlushNodesToBlock ( )
inlineprivate

Definition at line 1966 of file maglev-graph-builder.h.

1966  {
1967  ZoneVector<Node*>& nodes = current_block_->nodes();
1968  size_t old_size = nodes.size();
1969  nodes.resize(old_size + node_buffer().size());
1970  std::copy(node_buffer().begin(), node_buffer().end(),
1971  nodes.begin() + old_size);
1972  node_buffer().clear();
1973  }
ZoneVector< Node * > & nodes()
Node::Uses::const_iterator begin(const Node::Uses &uses)
Definition: node.h:708

References v8::internal::ZoneVector< T >::begin(), v8::internal::compiler::begin(), v8::internal::compiler::end(), v8::internal::ZoneVector< T >::resize(), v8::internal::ZoneVector< T >::size(), and size().

+ Here is the call graph for this function:

◆ GetAccumulator()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetAccumulator ( )
inlineprivate

◆ GetAccumulatorHoleyFloat64ForToNumber()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetAccumulatorHoleyFloat64ForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
inlineprivate

Definition at line 1663 of file maglev-graph-builder.h.

1665  {
1667  interpreter::Register::virtual_accumulator(), allowed_input_type,
1668  conversion_type);
1669  }

Referenced by BuildFloat64BinaryOperationNodeForToNumber(), BuildFloat64BinarySmiOperationNodeForToNumber(), and BuildFloat64UnaryOperationNodeForToNumber().

+ Here is the caller graph for this function:

◆ GetAccumulatorSmi()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::GetAccumulatorSmi ( UseReprHintRecording  record_use_repr_hint = UseReprHintRecording::kRecord)
inlineprivate

Definition at line 1643 of file maglev-graph-builder.h.

1645  {
1647  record_use_repr_hint);
1648  }

References v8::debug::anonymous_namespace{debug-interface.cc}::GetSmiValue().

Referenced by TryBuildStoreField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetAccumulatorTruncatedInt32ForToNumber()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetAccumulatorTruncatedInt32ForToNumber ( NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
inlineprivate

Definition at line 1650 of file maglev-graph-builder.h.

1652  {
1654  interpreter::Register::virtual_accumulator(), allowed_input_type,
1655  conversion_type);
1656  }

◆ GetAccumulatorUint8ClampedForToNumber()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetAccumulatorUint8ClampedForToNumber ( )
inlineprivate

Definition at line 1658 of file maglev-graph-builder.h.

1658  {
1661  }
ValueNode * GetUint8ClampedForToNumber(ValueNode *value)

◆ GetArgument()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetArgument ( int  i)

Definition at line 1069 of file maglev-graph-builder.cc.

1069  {
1072  return current_interpreter_frame_.get(reg);
1073 }
#define DCHECK_LT(v1, v2)
Definition: logging.h:488

References current_interpreter_frame_, DCHECK_LT, v8::internal::interpreter::Register::FromParameterIndex(), v8::internal::maglev::InterpreterFrameState::get(), and parameter_count().

+ Here is the call graph for this function:

◆ GetArgumentsAsArrayOfValueNodes()

base::Vector< ValueNode * > v8::internal::maglev::MaglevGraphBuilder::GetArgumentsAsArrayOfValueNodes ( compiler::SharedFunctionInfoRef  shared,
const CallArguments args 
)
private

Definition at line 10895 of file maglev-graph-builder.cc.

10896  {
10897  // TODO(victorgomes): Investigate if we can avoid this copy.
10898  int arg_count = static_cast<int>(args.count());
10899  auto arguments = zone()->AllocateVector<ValueNode*>(arg_count + 1);
10900  arguments[0] = GetConvertReceiver(shared, args);
10901  for (int i = 0; i < arg_count; i++) {
10902  arguments[i + 1] = args[i];
10903  }
10904  return arguments;
10905 }
base::Vector< T > AllocateVector(size_t length)
Definition: zone.h:132

References v8::base::args, and v8::internal::anonymous_namespace{json-stringifier.cc}::i.

◆ GetBooleanConstant()

RootConstant* v8::internal::maglev::MaglevGraphBuilder::GetBooleanConstant ( bool  value)
inline

Definition at line 343 of file maglev-graph-builder.h.

343  {
344  return graph()->GetBooleanConstant(value);
345  }
RootConstant * GetBooleanConstant(bool value)
Definition: maglev-graph.h:239

References v8::internal::maglev::Graph::GetBooleanConstant(), graph(), and v8::internal::value.

Referenced by BuildTaggedEqual(), BuildTestUndetectable(), TryReduceCompareEqualAgainstConstant(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetCallerDeoptFrame()

DeoptFrame * v8::internal::maglev::MaglevGraphBuilder::GetCallerDeoptFrame ( )
private

Definition at line 1362 of file maglev-graph-builder.cc.

1362  {
1363  if (!is_inline()) return nullptr;
1364  return caller_details_->deopt_frame;
1365 }

References caller_details_, v8::internal::maglev::MaglevCallerDetails::deopt_frame, and is_inline().

Referenced by GetDeoptFrameForLazyDeoptHelper(), and GetLatestCheckpointedFrame().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetCatchBlockFrameState()

MergePointInterpreterFrameState* v8::internal::maglev::MaglevGraphBuilder::GetCatchBlockFrameState ( )
inlineprivate

Definition at line 1258 of file maglev-graph-builder.h.

1258  {
1260  return merge_states_[catch_block_stack_.top().handler];
1261  }

References catch_block_stack_, v8::internal::DCHECK(), IsInsideTryBlock(), and merge_states_.

Referenced by AttachExceptionHandlerInfo().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetClosure()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetClosure ( ) const
inlineprivate

Definition at line 1452 of file maglev-graph-builder.h.

1452  {
1455  }

Referenced by BuildRegisterFrameInitialization(), GetDeoptFrameForEntryStackCheck(), GetDeoptFrameForLazyDeoptHelper(), and GetLatestCheckpointedFrame().

+ Here is the caller graph for this function:

◆ GetConstant()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetConstant ( compiler::ObjectRef  ref)
inline

Definition at line 346 of file maglev-graph-builder.h.

346  {
347  return graph()->GetConstant(ref);
348  }
ValueNode * GetConstant(compiler::ObjectRef ref)
Definition: maglev-graph.h:459

References v8::internal::maglev::Graph::GetConstant(), and graph().

Referenced by BuildCompareMaps(), BuildLoadField(), BuildLoadFixedArrayElement(), BuildLoadJSFunctionContext(), BuildLoadJSFunctionFeedbackCell(), BuildRegisterFrameInitialization(), BuildTransitionElementsKindAndCompareMaps(), SetKnownValue(), TryBuildNamedAccess(), TryBuildPropertyCellLoad(), TryBuildPropertyCellStore(), TryBuildPropertyLoad(), TryBuildScriptContextConstantLoad(), TryBuildScriptContextLoad(), TryBuildScriptContextStore(), TrySpecializeLoadContextSlot(), TrySpecializeLoadContextSlotToFunctionContext(), and TrySpecializeStoreContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetConstantSingleCharacterStringFromCode()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::GetConstantSingleCharacterStringFromCode ( uint16_t  code)
private

Definition at line 7536 of file maglev-graph-builder.cc.

7537  {
7538  // Only handle the one-byte character case, which accesses roots.
7541  }
7542  return {};
7543 }
static constexpr RootIndex SingleCharacterStringIndex(int c)
Definition: roots.h:644
static const int32_t kMaxOneByteCharCode
Definition: string.h:512
force emit tier up logic from all non turbofan code

References code, v8::internal::String::kMaxOneByteCharCode, and v8::internal::RootsTable::SingleCharacterStringIndex().

+ Here is the call graph for this function:

◆ GetContext()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetContext ( ) const
inlineprivate

Definition at line 1457 of file maglev-graph-builder.h.

1457  {
1460  }

Referenced by Build(), BuildCallBuiltin(), BuildRegisterFrameInitialization(), CheckContextExtensions(), and EscapeContext().

+ Here is the caller graph for this function:

◆ GetContextAtDepth()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetContextAtDepth ( ValueNode context,
size_t  depth 
)
private

Definition at line 7735 of file maglev-graph-builder.cc.

7736  {
7737  MinimizeContextChainDepth(&context, &depth);
7738 
7740  compiler::OptionalContextRef maybe_ref =
7741  FunctionContextSpecialization::TryToRef(compilation_unit_, context,
7742  &depth);
7743  if (maybe_ref.has_value()) {
7744  context = GetConstant(maybe_ref.value());
7745  }
7746  }
7747 
7748  for (size_t i = 0; i < depth; i++) {
7751  EnsureType(context, NodeType::kContext);
7752  }
7753  return context;
7754 }
void MinimizeContextChainDepth(ValueNode **context, size_t *depth)

References v8::internal::kNoContextCells, and v8::internal::Context::PREVIOUS_INDEX.

Referenced by BuildLoadContextSlot(), BuildStoreContextSlot(), and CheckContextExtensions().

+ Here is the caller graph for this function:

◆ GetConvertReceiver()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetConvertReceiver ( compiler::SharedFunctionInfoRef  shared,
const CallArguments args 
)
private

Definition at line 10868 of file maglev-graph-builder.cc.

10869  {
10870  if (shared.native() || shared.language_mode() == LanguageMode::kStrict) {
10871  if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
10872  return GetRootConstant(RootIndex::kUndefinedValue);
10873  } else {
10874  return args.receiver();
10875  }
10876  }
10877  if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
10878  return GetConstant(
10879  broker()->target_native_context().global_proxy_object(broker()));
10880  }
10881  ValueNode* receiver = args.receiver();
10882  if (CheckType(receiver, NodeType::kJSReceiver)) return receiver;
10883  if (compiler::OptionalHeapObjectRef maybe_constant =
10884  TryGetConstant(receiver)) {
10885  compiler::HeapObjectRef constant = maybe_constant.value();
10886  if (constant.IsNullOrUndefined()) {
10887  return GetConstant(
10888  broker()->target_native_context().global_proxy_object(broker()));
10889  }
10890  }
10891  return AddNewNode<ConvertReceiver>(
10892  {receiver}, broker()->target_native_context(), args.receiver_mode());
10893 }

References v8::base::args, broker(), v8::internal::kNullOrUndefined, v8::internal::kStrict, and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ GetCurrentCallFrequency()

float v8::internal::maglev::MaglevGraphBuilder::GetCurrentCallFrequency ( )
inlineprivate

Definition at line 3134 of file maglev-graph-builder.h.

3134  {
3135  if (!is_inline()) return 1.0f;
3137  }

◆ GetCurrentTryCatchBlock()

CatchBlockDetails v8::internal::maglev::MaglevGraphBuilder::GetCurrentTryCatchBlock ( )
inlineprivate

Definition at line 1263 of file maglev-graph-builder.h.

1263  {
1264  if (IsInsideTryBlock()) {
1265  // Inside a try-block.
1266  int offset = catch_block_stack_.top().handler;
1267  return {&jump_targets_[offset],
1268  merge_states_[offset]->exception_handler_was_used(), false, 0};
1269  }
1270  if (!is_inline()) {
1271  return CatchBlockDetails{};
1272  }
1273  return caller_details_->catch_block;
1274  }

References caller_details_, v8::internal::maglev::MaglevCallerDetails::catch_block, catch_block_stack_, v8::internal::maglev::MergePointInterpreterFrameState::exception_handler_was_used(), is_inline(), IsInsideTryBlock(), jump_targets_, and merge_states_.

Referenced by AttachExceptionHandlerInfo().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetDeoptFrameForEagerCall()

DeoptFrame * v8::internal::maglev::MaglevGraphBuilder::GetDeoptFrameForEagerCall ( const MaglevCompilationUnit unit,
ValueNode closure,
base::Vector< ValueNode * >  args 
)
private

Definition at line 1341 of file maglev-graph-builder.cc.

1343  {
1344  // The parent resumes after the call, which is roughly equivalent to a lazy
1345  // deopt. Use the helper function directly so that we can mark the
1346  // accumulator as dead (since it'll be overwritten by this function's
1347  // return value anyway).
1348  // TODO(leszeks): This is true for our current set of
1349  // inlinings/continuations, but there might be cases in the future where it
1350  // isn't. We may need to store the relevant overwritten register in
1351  // LazyDeoptFrameScope.
1352  DCHECK(
1356  DeoptFrame* deopt_frame = zone()->New<DeoptFrame>(
1358  current_deopt_scope_, true));
1359  return AddInlinedArgumentsToDeoptFrame(deopt_frame, unit, closure, args);
1360 }
static bool WritesAccumulator(Bytecode bytecode)
Definition: bytecodes.h:703
DeoptFrame * AddInlinedArgumentsToDeoptFrame(DeoptFrame *deopt_frame, const MaglevCompilationUnit *unit, ValueNode *closure, base::Vector< ValueNode * > args)
DeoptFrame GetDeoptFrameForLazyDeoptHelper(interpreter::Register result_location, int result_size, DeoptFrameScope *scope, bool mark_accumulator_dead)

References AddInlinedArgumentsToDeoptFrame(), v8::base::args, v8::internal::interpreter::Bytecodes::ClobbersAccumulator(), v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), current_deopt_scope_, v8::internal::DCHECK(), GetDeoptFrameForLazyDeoptHelper(), v8::internal::interpreter::Register::invalid_value(), iterator_, v8::internal::Zone::New(), v8::internal::interpreter::Bytecodes::WritesAccumulator(), and zone().

+ Here is the call graph for this function:

◆ GetDeoptFrameForEntryStackCheck()

InterpretedDeoptFrame v8::internal::maglev::MaglevGraphBuilder::GetDeoptFrameForEntryStackCheck ( )
private

Definition at line 1510 of file maglev-graph-builder.cc.

1510  {
1513  DCHECK(!is_inline());
1514  entry_stack_check_frame_.emplace(
1516  zone()->New<CompactInterpreterFrameState>(
1520  GetClosure(), BytecodeOffset(bailout_for_entrypoint()),
1521  current_source_position_, nullptr);
1522 
1523  (*entry_stack_check_frame_)
1524  .frame_state()
1525  ->ForEachValue(
1527  [&](ValueNode* node, interpreter::Register) { AddDeoptUse(node); });
1528  AddDeoptUse((*entry_stack_check_frame_).closure());
1529  return *entry_stack_check_frame_;
1530 }
std::optional< InterpretedDeoptFrame > entry_stack_check_frame_

References AddDeoptUse(), bailout_for_entrypoint(), compilation_unit_, current_interpreter_frame_, v8::internal::interpreter::BytecodeArrayIterator::current_offset(), current_source_position_, v8::internal::DCHECK(), DCHECK_EQ, entry_stack_check_frame_, entrypoint_, GetClosure(), GetInLivenessFor(), graph_, is_inline(), v8::internal::maglev::Graph::is_osr(), iterator_, and zone().

Referenced by Build(), and GetLatestCheckpointedFrame().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetDeoptFrameForLazyDeopt()

DeoptFrame v8::internal::maglev::MaglevGraphBuilder::GetDeoptFrameForLazyDeopt ( interpreter::Register  result_location,
int  result_size 
)
private

Definition at line 1424 of file maglev-graph-builder.cc.

1425  {
1426  return GetDeoptFrameForLazyDeoptHelper(result_location, result_size,
1427  current_deopt_scope_, false);
1428 }

References current_deopt_scope_, and GetDeoptFrameForLazyDeoptHelper().

Referenced by AttachLazyDeoptInfo().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetDeoptFrameForLazyDeoptHelper()

DeoptFrame v8::internal::maglev::MaglevGraphBuilder::GetDeoptFrameForLazyDeoptHelper ( interpreter::Register  result_location,
int  result_size,
DeoptFrameScope scope,
bool  mark_accumulator_dead 
)
private

Definition at line 1430 of file maglev-graph-builder.cc.

1432  {
1433  if (scope == nullptr) {
1434  compiler::BytecodeLivenessState* liveness =
1435  zone()->New<compiler::BytecodeLivenessState>(*GetOutLiveness(), zone());
1436  // Remove result locations from liveness.
1437  if (result_location == interpreter::Register::virtual_accumulator()) {
1438  DCHECK_EQ(result_size, 1);
1439  liveness->MarkAccumulatorDead();
1440  mark_accumulator_dead = false;
1441  } else {
1442  DCHECK(!result_location.is_parameter());
1443  for (int i = 0; i < result_size; i++) {
1444  liveness->MarkRegisterDead(result_location.index() + i);
1445  }
1446  }
1447  // Explicitly drop the accumulator if needed.
1448  if (mark_accumulator_dead && liveness->AccumulatorIsLive()) {
1449  liveness->MarkAccumulatorDead();
1450  }
1452  InterpretedDeoptFrame ret(
1454  zone()->New<CompactInterpreterFrameState>(*compilation_unit_, liveness,
1456  GetClosure(), BytecodeOffset(iterator_.current_offset()),
1458  ret.frame_state()->ForEachValue(
1459  *compilation_unit_, [this](ValueNode* node, interpreter::Register reg) {
1460  // Receiver and closure values have to be materialized, even if
1461  // they don't otherwise escape.
1462  if (reg == interpreter::Register::receiver() ||
1464  node->add_use();
1465  } else {
1466  AddDeoptUse(node);
1467  }
1468  });
1469  AddDeoptUse(ret.closure());
1470  return ret;
1471  }
1472 
1473  // Currently only support builtin continuations for bytecodes that write to
1474  // the accumulator
1477 
1478 #ifdef DEBUG
1479  if (scope->data().tag() == DeoptFrame::FrameType::kBuiltinContinuationFrame) {
1480  const DeoptFrame::BuiltinContinuationFrameData& frame =
1482  .get<DeoptFrame::BuiltinContinuationFrameData>();
1483  if (frame.maybe_js_target) {
1484  int stack_parameter_count =
1485  Builtins::GetStackParameterCount(frame.builtin_id);
1486  // The deopt input value is passed by the deoptimizer, so shouldn't be a
1487  // parameter here.
1488  DCHECK_EQ(stack_parameter_count, frame.parameters.length() + 1);
1489  } else {
1490  CallInterfaceDescriptor descriptor =
1491  Builtins::CallInterfaceDescriptorFor(frame.builtin_id);
1492  // The deopt input value is passed by the deoptimizer, so shouldn't be a
1493  // parameter here.
1494  DCHECK_EQ(descriptor.GetParameterCount(), frame.parameters.length() + 1);
1495  // The deopt input value is passed on the stack.
1496  DCHECK_GT(descriptor.GetStackParameterCount(), 0);
1497  }
1498  }
1499 #endif
1500 
1501  // Mark the accumulator dead in parent frames since we know that the
1502  // continuation will write it.
1503  return DeoptFrame(scope->data(),
1504  zone()->New<DeoptFrame>(GetDeoptFrameForLazyDeoptHelper(
1505  result_location, result_size, scope->parent(),
1506  scope->data().tag() ==
1508 }
constexpr const auto & get() const
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin)
Definition: builtins.cc:189
static V8_EXPORT_PRIVATE int GetStackParameterCount(Builtin builtin)
Definition: builtins.cc:160
static bool WritesOrClobbersAccumulator(Bytecode bytecode)
Definition: bytecodes.h:715
static constexpr Register receiver()
const compiler::BytecodeLivenessState * GetOutLiveness() const

References v8::internal::compiler::BytecodeLivenessState::AccumulatorIsLive(), v8::internal::maglev::ValueNode::add_use(), AddDeoptUse(), v8::internal::maglev::DeoptFrame::BuiltinContinuationFrameData::builtin_id, v8::internal::Builtins::CallInterfaceDescriptorFor(), v8::internal::maglev::InterpretedDeoptFrame::closure(), compilation_unit_, v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), current_deopt_scope_, current_interpreter_frame_, v8::internal::interpreter::BytecodeArrayIterator::current_offset(), current_source_position_, v8::internal::maglev::MaglevGraphBuilder::DeoptFrameScope::data(), v8::internal::DCHECK(), DCHECK_EQ, DCHECK_GT, v8::internal::maglev::CompactInterpreterFrameState::ForEachValue(), v8::internal::maglev::InterpretedDeoptFrame::frame_state(), v8::internal::interpreter::Register::function_closure(), v8::base::DiscriminatedUnion< TagEnum, Ts >::get(), GetCallerDeoptFrame(), GetClosure(), GetOutLiveness(), v8::internal::CallInterfaceDescriptor::GetParameterCount(), v8::internal::CallInterfaceDescriptor::GetStackParameterCount(), v8::internal::Builtins::GetStackParameterCount(), v8::internal::interpreter::Register::index(), v8::internal::interpreter::Register::is_parameter(), iterator_, v8::internal::maglev::DeoptFrame::kBuiltinContinuationFrame, v8::internal::compiler::BytecodeLivenessState::MarkAccumulatorDead(), v8::internal::compiler::BytecodeLivenessState::MarkRegisterDead(), v8::internal::maglev::DeoptFrame::BuiltinContinuationFrameData::maybe_js_target, v8::internal::Zone::New(), v8::internal::maglev::DeoptFrame::BuiltinContinuationFrameData::parameters, v8::internal::maglev::MaglevGraphBuilder::DeoptFrameScope::parent(), v8::internal::interpreter::Register::receiver(), v8::internal::maglev::VirtualObjectList::Snapshot(), v8::base::DiscriminatedUnion< TagEnum, Ts >::tag(), v8::internal::interpreter::Register::virtual_accumulator(), v8::internal::maglev::InterpreterFrameState::virtual_objects(), v8::internal::interpreter::Bytecodes::WritesOrClobbersAccumulator(), and zone().

Referenced by GetDeoptFrameForEagerCall(), and GetDeoptFrameForLazyDeopt().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetFeedbackCell()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetFeedbackCell ( )
inlineprivate

Definition at line 1447 of file maglev-graph-builder.h.

1447  {
1448  return graph()->GetConstant(
1450  }
const MaglevCompilationUnit * GetTopLevelCompilationUnit() const
compiler::FeedbackCellRef feedback_cell() const

References graph().

+ Here is the call graph for this function:

◆ GetFlag16Operand()

uint32_t v8::internal::maglev::MaglevGraphBuilder::GetFlag16Operand ( int  operand_index) const
inlineprivate

Definition at line 1475 of file maglev-graph-builder.h.

1475  {
1476  return iterator_.GetFlag16Operand(operand_index);
1477  }

◆ GetFlag8Operand()

uint32_t v8::internal::maglev::MaglevGraphBuilder::GetFlag8Operand ( int  operand_index) const
inlineprivate

Definition at line 1471 of file maglev-graph-builder.h.

1471  {
1472  return iterator_.GetFlag8Operand(operand_index);
1473  }

◆ GetFloat64() [1/2]

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetFloat64 ( interpreter::Register  reg)
inlineprivate

Definition at line 1604 of file maglev-graph-builder.h.

1604  {
1606  }

◆ GetFloat64() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetFloat64 ( ValueNode value)
private

Definition at line 1970 of file maglev-graph-builder.cc.

1970  {
1972  return GetFloat64ForToNumber(value, NodeType::kNumber,
1974 }
ValueNode * GetFloat64ForToNumber(ValueNode *value, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)

References GetFloat64ForToNumber(), v8::internal::maglev::kFloat64, v8::internal::maglev::kOnlyNumber, RecordUseReprHintIfPhi(), and v8::internal::value.

Referenced by GetFloat64ForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetFloat64Constant() [1/2]

Float64Constant* v8::internal::maglev::MaglevGraphBuilder::GetFloat64Constant ( double  constant)
inline

Definition at line 334 of file maglev-graph-builder.h.

334  {
335  return graph()->GetFloat64Constant(constant);
336  }
Float64Constant * GetFloat64Constant(double constant)
Definition: maglev-graph.h:226

References v8::internal::maglev::Graph::GetFloat64Constant(), and graph().

Referenced by BuildFloat64BinarySmiOperationNodeForToNumber(), BuildFloat64UnaryOperationNodeForToNumber(), BuildLoadField(), BuildLoadFixedDoubleArrayElement(), GetFloat64ForToNumber(), and GetNumberConstant().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetFloat64Constant() [2/2]

Float64Constant* v8::internal::maglev::MaglevGraphBuilder::GetFloat64Constant ( Float64  constant)
inline

Definition at line 337 of file maglev-graph-builder.h.

337  {
338  return graph()->GetFloat64Constant(constant);
339  }

References v8::internal::maglev::Graph::GetFloat64Constant(), and graph().

+ Here is the call graph for this function:

◆ GetFloat64ForToNumber() [1/2]

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetFloat64ForToNumber ( interpreter::Register  reg,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
inlineprivate

Definition at line 1620 of file maglev-graph-builder.h.

1622  {
1624  allowed_input_type, conversion_type);
1625  }

◆ GetFloat64ForToNumber() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetFloat64ForToNumber ( ValueNode value,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2016 of file maglev-graph-builder.cc.

2018  {
2019  ValueRepresentation representation =
2020  value->properties().value_representation();
2021  if (representation == ValueRepresentation::kFloat64) return value;
2022 
2023  // Process constants first to avoid allocating NodeInfo for them.
2024  if (auto cst = TryGetFloat64Constant(value, conversion_type)) {
2025  return GetFloat64Constant(cst.value());
2026  }
2027  // We could emit unconditional eager deopts for other kinds of constant, but
2028  // it's not necessary, the appropriate checking conversion nodes will deopt.
2029 
2030  NodeInfo* node_info = GetOrCreateInfoFor(value);
2031  auto& alternative = node_info->alternative();
2032 
2033  if (ValueNode* alt = alternative.float64()) {
2034  return alt;
2035  }
2036 
2037  // This is called when converting inputs in AddNewNode. We might already have
2038  // an empty type for `value` here. Make sure we don't add unsafe conversion
2039  // nodes in that case by checking for the empty node type explicitly.
2040  // TODO(marja): The checks can be removed after we're able to bail out
2041  // earlier.
2042  switch (representation) {
2044  auto combined_type = IntersectType(allowed_input_type, node_info->type());
2045  if (!IsEmptyNodeType(node_info->type()) &&
2046  NodeTypeIs(combined_type, NodeType::kSmi)) {
2047  // Get the float64 value of a Smi value its int32 representation.
2048  return GetFloat64(GetInt32(value));
2049  }
2050  if (!IsEmptyNodeType(node_info->type()) &&
2051  NodeTypeIs(combined_type, NodeType::kNumber)) {
2052  // Number->Float64 conversions are exact alternatives, so they can
2053  // also become the canonical float64_alternative.
2054  return alternative.set_float64(BuildNumberOrOddballToFloat64(
2055  value, NodeType::kNumber,
2057  }
2058  if (!IsEmptyNodeType(node_info->type()) &&
2059  NodeTypeIs(combined_type, NodeType::kNumberOrOddball)) {
2060  // NumberOrOddball->Float64 conversions are not exact alternatives,
2061  // since they lose the information that this is an oddball, so they
2062  // can only become the canonical float64_alternative if they are a
2063  // known number (and therefore not oddball).
2064  return BuildNumberOrOddballToFloat64(value, combined_type,
2065  conversion_type);
2066  }
2067  // The type is impossible. We could generate an unconditional deopt here,
2068  // but it's too invasive. So we just generate a check which will always
2069  // deopt.
2070  return BuildNumberOrOddballToFloat64(value, allowed_input_type,
2071  conversion_type);
2072  }
2074  return alternative.set_float64(AddNewNode<ChangeInt32ToFloat64>({value}));
2076  return alternative.set_float64(
2077  AddNewNode<ChangeUint32ToFloat64>({value}));
2079  switch (allowed_input_type) {
2080  case NodeType::kSmi:
2081  case NodeType::kNumber:
2082  case NodeType::kNumberOrBoolean:
2083  // Number->Float64 conversions are exact alternatives, so they can
2084  // also become the canonical float64_alternative. The HoleyFloat64
2085  // representation can represent undefined but no other oddballs, so
2086  // booleans cannot occur here and kNumberOrBoolean can be grouped with
2087  // kNumber.
2088  return alternative.set_float64(
2089  AddNewNode<CheckedHoleyFloat64ToFloat64>({value}));
2090  case NodeType::kNumberOrOddball:
2091  // NumberOrOddball->Float64 conversions are not exact alternatives,
2092  // since they lose the information that this is an oddball, so they
2093  // cannot become the canonical float64_alternative.
2094  return AddNewNode<HoleyFloat64ToMaybeNanFloat64>({value});
2095  default:
2096  UNREACHABLE();
2097  }
2098  }
2100  return alternative.set_float64(
2101  AddNewNode<ChangeIntPtrToFloat64>({value}));
2103  UNREACHABLE();
2104  }
2105  UNREACHABLE();
2106 }
std::optional< double > TryGetFloat64Constant(ValueNode *value, TaggedToFloat64ConversionType conversion_type)
ValueNode * BuildNumberOrOddballToFloat64(ValueNode *node, NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)

References v8::internal::maglev::NodeInfo::alternative(), BuildNumberOrOddballToFloat64(), GetFloat64(), GetFloat64Constant(), GetInt32(), GetOrCreateInfoFor(), v8::internal::maglev::IntersectType(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::maglev::kOnlyNumber, v8::internal::compiler::kSmi, v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, v8::internal::maglev::NodeTypeIs(), TryGetFloat64Constant(), v8::internal::maglev::NodeInfo::type(), v8::internal::UNREACHABLE(), and v8::internal::value.

Referenced by GetFloat64(), GetHoleyFloat64ForToNumber(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetHoleyFloat64()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetHoleyFloat64 ( ValueNode value,
bool  convert_hole_to_undefined 
)
private

◆ GetHoleyFloat64ForToNumber() [1/2]

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetHoleyFloat64ForToNumber ( interpreter::Register  reg,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
inlineprivate

Definition at line 1631 of file maglev-graph-builder.h.

1633  {
1635  allowed_input_type, conversion_type);
1636  }

◆ GetHoleyFloat64ForToNumber() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetHoleyFloat64ForToNumber ( ValueNode value,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 2108 of file maglev-graph-builder.cc.

2110  {
2112  ValueRepresentation representation =
2113  value->properties().value_representation();
2114  // Ignore the hint for
2115  if (representation == ValueRepresentation::kHoleyFloat64) return value;
2116  return GetFloat64ForToNumber(value, allowed_input_type, conversion_type);
2117 }

References GetFloat64ForToNumber(), v8::internal::maglev::kHoleyFloat64, RecordUseReprHintIfPhi(), and v8::internal::value.

+ Here is the call graph for this function:

◆ GetInlinedArgument()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetInlinedArgument ( int  i)

Definition at line 1075 of file maglev-graph-builder.cc.

1075  {
1076  DCHECK(is_inline());
1078  return caller_details_->arguments[i];
1079 }

References argument_count(), v8::internal::maglev::MaglevCallerDetails::arguments, caller_details_, v8::internal::DCHECK(), DCHECK_LT, v8::internal::anonymous_namespace{json-stringifier.cc}::i, and is_inline().

+ Here is the call graph for this function:

◆ GetInLiveness()

const compiler::BytecodeLivenessState* v8::internal::maglev::MaglevGraphBuilder::GetInLiveness ( ) const
inlineprivate

Definition at line 1889 of file maglev-graph-builder.h.

1889  {
1891  }

Referenced by GetLatestCheckpointedFrame().

+ Here is the caller graph for this function:

◆ GetInLivenessFor()

const compiler::BytecodeLivenessState* v8::internal::maglev::MaglevGraphBuilder::GetInLivenessFor ( int  offset) const
inlineprivate

Definition at line 1892 of file maglev-graph-builder.h.

1892  {
1893  return bytecode_analysis().GetInLivenessFor(offset);
1894  }
const BytecodeLivenessState * GetInLivenessFor(int offset) const

Referenced by BuildMergeStates(), and GetDeoptFrameForEntryStackCheck().

+ Here is the caller graph for this function:

◆ GetInt32()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetInt32 ( ValueNode value,
bool  can_be_heap_number = false 
)
private

Definition at line 1851 of file maglev-graph-builder.cc.

1852  {
1854 
1855  ValueRepresentation representation =
1856  value->properties().value_representation();
1857  if (representation == ValueRepresentation::kInt32) return value;
1858 
1859  // Process constants first to avoid allocating NodeInfo for them.
1860  if (auto cst = TryGetInt32Constant(value)) {
1861  return GetInt32Constant(cst.value());
1862  }
1863  // We could emit unconditional eager deopts for other kinds of constant, but
1864  // it's not necessary, the appropriate checking conversion nodes will deopt.
1865 
1866  NodeInfo* node_info = GetOrCreateInfoFor(value);
1867  auto& alternative = node_info->alternative();
1868 
1869  if (ValueNode* alt = alternative.int32()) {
1870  return alt;
1871  }
1872 
1873  switch (representation) {
1875  if (can_be_heap_number && !CheckType(value, NodeType::kSmi)) {
1876  return alternative.set_int32(AddNewNode<CheckedNumberToInt32>({value}));
1877  }
1878  return alternative.set_int32(BuildSmiUntag(value));
1879  }
1881  if (!IsEmptyNodeType(GetType(value)) && node_info->is_smi()) {
1882  return alternative.set_int32(
1883  AddNewNode<TruncateUint32ToInt32>({value}));
1884  }
1885  return alternative.set_int32(AddNewNode<CheckedUint32ToInt32>({value}));
1886  }
1888  // The check here will also work for the hole NaN, so we can treat
1889  // HoleyFloat64 as Float64.
1891  return alternative.set_int32(
1892  AddNewNode<CheckedTruncateFloat64ToInt32>({value}));
1893  }
1894 
1896  return alternative.set_int32(AddNewNode<CheckedIntPtrToInt32>({value}));
1897 
1899  UNREACHABLE();
1900  }
1901  UNREACHABLE();
1902 }

References v8::internal::maglev::NodeInfo::alternative(), BuildSmiUntag(), CheckType(), GetInt32Constant(), GetOrCreateInfoFor(), GetType(), v8::internal::maglev::NodeInfo::is_smi(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::compiler::kSmi, v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, RecordUseReprHintIfPhi(), TryGetInt32Constant(), v8::internal::UNREACHABLE(), and v8::internal::value.

Referenced by GetFloat64ForToNumber(), GetInt32ElementIndex(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetInt32Constant()

Int32Constant* v8::internal::maglev::MaglevGraphBuilder::GetInt32Constant ( int32_t  constant)
inline

Definition at line 325 of file maglev-graph-builder.h.

325  {
326  return graph()->GetInt32Constant(constant);
327  }
Int32Constant * GetInt32Constant(int32_t constant)
Definition: maglev-graph.h:214

References v8::internal::maglev::Graph::GetInt32Constant(), and graph().

Referenced by BuildInt32BinarySmiOperationNode(), BuildLoadFixedDoubleArrayElement(), BuildTruncatingInt32BinarySmiOperationNodeForToNumber(), GetInt32(), GetInt32ElementIndex(), GetNumberConstant(), GetTruncatedInt32ForToNumber(), GetUint8ClampedForToNumber(), TryBuildNewConsString(), TryFoldInt32BinaryOperation(), and TryFoldInt32UnaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetInt32ElementIndex() [1/2]

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetInt32ElementIndex ( interpreter::Register  reg)
inlineprivate

Definition at line 2451 of file maglev-graph-builder.h.

2451  {
2452  ValueNode* index_object = current_interpreter_frame_.get(reg);
2453  return GetInt32ElementIndex(index_object);
2454  }
ValueNode * GetInt32ElementIndex(interpreter::Register reg)

Referenced by GetUint32ElementIndex().

+ Here is the caller graph for this function:

◆ GetInt32ElementIndex() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetInt32ElementIndex ( ValueNode index_object)
private

Definition at line 6055 of file maglev-graph-builder.cc.

6055  {
6057 
6058  switch (object->properties().value_representation()) {
6060  return AddNewNode<CheckedIntPtrToInt32>({object});
6062  NodeType old_type;
6063  if (SmiConstant* constant = object->TryCast<SmiConstant>()) {
6064  return GetInt32Constant(constant->value().value());
6065  } else if (CheckType(object, NodeType::kSmi, &old_type)) {
6066  auto& alternative = GetOrCreateInfoFor(object)->alternative();
6067  return alternative.get_or_set_int32(
6068  [&]() { return BuildSmiUntag(object); });
6069  } else {
6070  // TODO(leszeks): Cache this knowledge/converted value somehow on
6071  // the node info.
6072  return AddNewNode<CheckedObjectToIndex>({object},
6073  GetCheckType(old_type));
6074  }
6076  // Already good.
6077  return object;
6081  return GetInt32(object);
6082  }
6083 }
const AlternativeNodes & alternative() const

References v8::internal::maglev::NodeInfo::alternative(), BuildSmiUntag(), CheckType(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetInt32(), GetInt32Constant(), GetOrCreateInfoFor(), v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::compiler::kSmi, v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, v8::internal::maglev::NodeBase::properties(), RecordUseReprHintIfPhi(), v8::internal::maglev::NodeBase::TryCast(), and v8::internal::maglev::OpProperties::value_representation().

+ Here is the call graph for this function:

◆ GetInternalizedString()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetInternalizedString ( interpreter::Register  reg)
private

Definition at line 1659 of file maglev-graph-builder.cc.

1660  {
1661  ValueNode* node = current_interpreter_frame_.get(reg);
1662  NodeType old_type;
1663  if (CheckType(node, NodeType::kInternalizedString, &old_type)) return node;
1664  NodeInfo* known_info = GetOrCreateInfoFor(node);
1665  if (known_info->alternative().checked_value()) {
1666  node = known_info->alternative().checked_value();
1667  if (CheckType(node, NodeType::kInternalizedString, &old_type)) return node;
1668  }
1669 
1670  if (!NodeTypeIs(old_type, NodeType::kString)) {
1671  known_info->IntersectType(NodeType::kString);
1672  }
1673 
1674  // This node may unwrap ThinStrings.
1675  ValueNode* maybe_unwrapping_node =
1676  AddNewNode<CheckedInternalizedString>({node}, GetCheckType(old_type));
1677  known_info->alternative().set_checked_value(maybe_unwrapping_node);
1678 
1679  current_interpreter_frame_.set(reg, maybe_unwrapping_node);
1680  return maybe_unwrapping_node;
1681 }

References v8::internal::maglev::NodeInfo::alternative(), CheckType(), current_interpreter_frame_, v8::internal::maglev::InterpreterFrameState::get(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetCheckType(), GetOrCreateInfoFor(), v8::internal::maglev::NodeInfo::IntersectType(), v8::internal::maglev::NodeTypeIs(), and v8::internal::maglev::InterpreterFrameState::set().

Referenced by VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetIntPtrConstant()

IntPtrConstant* v8::internal::maglev::MaglevGraphBuilder::GetIntPtrConstant ( intptr_t  constant)
inline

Definition at line 328 of file maglev-graph-builder.h.

328  {
329  return graph()->GetIntPtrConstant(constant);
330  }
IntPtrConstant * GetIntPtrConstant(intptr_t constant)
Definition: maglev-graph.h:218

References v8::internal::maglev::Graph::GetIntPtrConstant(), and graph().

+ Here is the call graph for this function:

◆ GetLatestCheckpointedFrame()

DeoptFrame v8::internal::maglev::MaglevGraphBuilder::GetLatestCheckpointedFrame ( )

Definition at line 1367 of file maglev-graph-builder.cc.

1367  {
1368  if (in_prologue_) {
1370  }
1373  latest_checkpointed_frame_.emplace(InterpretedDeoptFrame(
1375  zone()->New<CompactInterpreterFrameState>(
1377  GetClosure(), BytecodeOffset(iterator_.current_offset()),
1379 
1380  latest_checkpointed_frame_->as_interpreted().frame_state()->ForEachValue(
1382  [&](ValueNode* node, interpreter::Register) { AddDeoptUse(node); });
1383  AddDeoptUse(latest_checkpointed_frame_->as_interpreted().closure());
1384 
1385  // Skip lazy deopt builtin continuations.
1386  const DeoptFrameScope* deopt_scope = current_deopt_scope_;
1387  while (deopt_scope != nullptr &&
1388  deopt_scope->IsLazyDeoptContinuationFrame()) {
1389  deopt_scope = deopt_scope->parent();
1390  }
1391 
1392  if (deopt_scope != nullptr) {
1393  // Support exactly one eager deopt builtin continuation. This can be
1394  // expanded in the future if necessary.
1395  DCHECK_NULL(deopt_scope->parent());
1396  DCHECK_EQ(deopt_scope->data().tag(),
1398 #ifdef DEBUG
1399  if (deopt_scope->data().tag() ==
1401  const DeoptFrame::BuiltinContinuationFrameData& frame =
1402  deopt_scope->data().get<DeoptFrame::BuiltinContinuationFrameData>();
1403  if (frame.maybe_js_target) {
1404  int stack_parameter_count =
1405  Builtins::GetStackParameterCount(frame.builtin_id);
1406  DCHECK_EQ(stack_parameter_count, frame.parameters.length());
1407  } else {
1408  CallInterfaceDescriptor descriptor =
1409  Builtins::CallInterfaceDescriptorFor(frame.builtin_id);
1410  DCHECK_EQ(descriptor.GetParameterCount(), frame.parameters.length());
1411  }
1412  }
1413 #endif
1414 
1415  // Wrap the above frame in the scope frame.
1417  deopt_scope->data(),
1418  zone()->New<DeoptFrame>(*latest_checkpointed_frame_));
1419  }
1420  }
1422 }
const compiler::BytecodeLivenessState * GetInLiveness() const

References AddDeoptUse(), v8::internal::maglev::DeoptFrame::BuiltinContinuationFrameData::builtin_id, v8::internal::Builtins::CallInterfaceDescriptorFor(), compilation_unit_, current_deopt_scope_, current_interpreter_frame_, v8::internal::interpreter::BytecodeArrayIterator::current_offset(), current_source_position_, v8::internal::maglev::MaglevGraphBuilder::DeoptFrameScope::data(), DCHECK_EQ, DCHECK_NULL, v8::base::DiscriminatedUnion< TagEnum, Ts >::get(), GetCallerDeoptFrame(), GetClosure(), GetDeoptFrameForEntryStackCheck(), GetInLiveness(), v8::internal::CallInterfaceDescriptor::GetParameterCount(), v8::internal::Builtins::GetStackParameterCount(), in_prologue_, v8::internal::maglev::MaglevGraphBuilder::DeoptFrameScope::IsLazyDeoptContinuationFrame(), iterator_, v8::internal::maglev::DeoptFrame::kBuiltinContinuationFrame, latest_checkpointed_frame_, v8::internal::maglev::DeoptFrame::BuiltinContinuationFrameData::maybe_js_target, v8::internal::maglev::DeoptFrame::BuiltinContinuationFrameData::parameters, v8::internal::maglev::MaglevGraphBuilder::DeoptFrameScope::parent(), v8::internal::maglev::VirtualObjectList::Snapshot(), v8::base::DiscriminatedUnion< TagEnum, Ts >::tag(), v8::internal::maglev::InterpreterFrameState::virtual_objects(), and zone().

Referenced by AttachDeoptCheckpoint(), AttachEagerDeoptInfo(), v8::internal::maglev::MergePointInterpreterFrameState::MergeLoop(), and v8::internal::maglev::MergePointInterpreterFrameState::TryMergeLoop().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetModifiableObjectFromAllocation()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::GetModifiableObjectFromAllocation ( InlinedAllocation allocation)
private

Definition at line 5200 of file maglev-graph-builder.cc.

5201  {
5202  VirtualObject* vobject = allocation->object();
5203  // If it hasn't be snapshotted yet, it is the latest created version of this
5204  // object and we can still modify it, we don't need to copy it.
5205  if (vobject->IsSnapshot()) {
5206  return DeepCopyVirtualObject(
5208  allocation));
5209  }
5210  return vobject;
5211 }
VirtualObject * DeepCopyVirtualObject(VirtualObject *vobj)

References current_interpreter_frame_, DeepCopyVirtualObject(), v8::internal::maglev::VirtualObjectList::FindAllocatedWith(), v8::internal::maglev::VirtualObject::IsSnapshot(), v8::internal::maglev::InlinedAllocation::object(), and v8::internal::maglev::InterpreterFrameState::virtual_objects().

Referenced by TryBuildStoreTaggedFieldToAllocation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetNumberConstant()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetNumberConstant ( double  constant)

Definition at line 2511 of file maglev-graph-builder.cc.

2511  {
2512  if (IsSmiDouble(constant)) {
2513  return GetInt32Constant(FastD2I(constant));
2514  }
2515  return GetFloat64Constant(constant);
2516 }
bool IsSmiDouble(double value)
int FastD2I(double x)
Definition: conversions.h:101

References v8::internal::FastD2I(), GetFloat64Constant(), GetInt32Constant(), and v8::internal::IsSmiDouble().

Referenced by TryFoldFloat64BinaryOperationForToNumber(), and TryFoldFloat64UnaryOperationForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetObjectFromAllocation()

VirtualObject * v8::internal::maglev::MaglevGraphBuilder::GetObjectFromAllocation ( InlinedAllocation allocation)
private

Definition at line 5188 of file maglev-graph-builder.cc.

5189  {
5190  VirtualObject* vobject = allocation->object();
5191  // If it hasn't be snapshotted yet, it is the latest created version of this
5192  // object, we don't need to search for it.
5193  if (vobject->IsSnapshot()) {
5195  allocation);
5196  }
5197  return vobject;
5198 }

References current_interpreter_frame_, v8::internal::maglev::VirtualObjectList::FindAllocatedWith(), v8::internal::maglev::VirtualObject::IsSnapshot(), v8::internal::maglev::InlinedAllocation::object(), and v8::internal::maglev::InterpreterFrameState::virtual_objects().

Referenced by BuildLoadFixedArrayElement(), and BuildLoadFixedDoubleArrayElement().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetOrCreateInfoFor()

NodeInfo* v8::internal::maglev::MaglevGraphBuilder::GetOrCreateInfoFor ( ValueNode node)
inlineprivate

Definition at line 545 of file maglev-graph-builder.h.

545  {
546  return known_node_aspects().GetOrCreateInfoFor(broker(), node);
547  }
NodeInfo * GetOrCreateInfoFor(compiler::JSHeapBroker *broker, ValueNode *node)

References broker(), v8::internal::maglev::KnownNodeAspects::GetOrCreateInfoFor(), and known_node_aspects().

Referenced by BuildCheckMaps(), BuildCompareMaps(), BuildLoadField(), BuildLoadJSArrayLength(), BuildStoreMap(), BuildTransitionElementsKindAndCompareMaps(), BuildTransitionElementsKindOrCheckMap(), CanElideWriteBarrier(), GetFloat64ForToNumber(), GetInt32(), GetInt32ElementIndex(), GetInternalizedString(), GetSmiValue(), GetTaggedValue(), GetTruncatedInt32ForToNumber(), and SetKnownValue().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetOutLiveness()

const compiler::BytecodeLivenessState* v8::internal::maglev::MaglevGraphBuilder::GetOutLiveness ( ) const
inlineprivate

Definition at line 1895 of file maglev-graph-builder.h.

1895  {
1897  }
const compiler::BytecodeLivenessState * GetOutLivenessFor(int offset) const

Referenced by GetDeoptFrameForLazyDeoptHelper().

+ Here is the caller graph for this function:

◆ GetOutLivenessFor()

const compiler::BytecodeLivenessState* v8::internal::maglev::MaglevGraphBuilder::GetOutLivenessFor ( int  offset) const
inlineprivate

Definition at line 1898 of file maglev-graph-builder.h.

1898  {
1899  return bytecode_analysis().GetOutLivenessFor(offset);
1900  }
const BytecodeLivenessState * GetOutLivenessFor(int offset) const

◆ GetRefOperand()

template<class T >
compiler::ref_traits<T>::ref_type v8::internal::maglev::MaglevGraphBuilder::GetRefOperand ( int  operand_index)
inlineprivate

Definition at line 1480 of file maglev-graph-builder.h.

1482  {
1483  // The BytecodeArray itself was fetched by using a barrier so all reads
1484  // from the constant pool are safe.
1485  return MakeRefAssumeMemoryFence(
1486  broker(), broker()->CanonicalPersistentHandle(
1488  operand_index, local_isolate()))));
1489  }
Handle< Object > GetConstantForIndexOperand(int operand_index, IsolateT *isolate) const

References broker(), and v8::internal::compiler::MakeRefAssumeMemoryFence().

+ Here is the call graph for this function:

◆ GetRegisterInput()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetRegisterInput ( Register  reg)
inlineprivate

Definition at line 1493 of file maglev-graph-builder.h.

1493  {
1494  DCHECK(!graph_->register_inputs().has(reg));
1495  graph_->register_inputs().set(reg);
1496  return AddNewNode<RegisterInput>({}, reg);
1497  }
constexpr void set(RegisterT reg)
Definition: reglist-base.h:47
constexpr bool has(RegisterT reg) const
Definition: reglist-base.h:57

References DCHECK.

Referenced by BuildRegisterFrameInitialization().

+ Here is the caller graph for this function:

◆ GetResultLocationAndSize()

std::pair< interpreter::Register, int > v8::internal::maglev::MaglevGraphBuilder::GetResultLocationAndSize ( ) const
private

Definition at line 1286 of file maglev-graph-builder.cc.

1286  {
1291  // TODO(leszeks): Only emit these cases for bytecodes we know can lazy deopt.
1292  switch (bytecode) {
1293 #define CASE(Name, ...) \
1294  case Bytecode::k##Name: \
1295  return GetResultLocationAndSizeForBytecode<Bytecode::k##Name, \
1296  __VA_ARGS__>(iterator_);
1297  BYTECODE_LIST(CASE, CASE)
1298 #undef CASE
1299  }
1300  UNREACHABLE();
1301 }
#define BYTECODE_LIST(V, V_TSA)
Definition: bytecodes.h:490

References bytecode(), BYTECODE_LIST, CASE, v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), iterator_, and v8::internal::UNREACHABLE().

Referenced by AttachLazyDeoptInfo().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetRootConstant()

RootConstant* v8::internal::maglev::MaglevGraphBuilder::GetRootConstant ( RootIndex  index)
inline

Definition at line 340 of file maglev-graph-builder.h.

340  {
341  return graph()->GetRootConstant(index);
342  }
RootConstant * GetRootConstant(RootIndex index)
Definition: maglev-graph.h:235

References v8::internal::maglev::Graph::GetRootConstant(), graph(), and v8::internal::index.

Referenced by BuildLoadFixedArrayElement(), BuildLoadFixedDoubleArrayElement(), BuildNewConsStringMap(), BuildRegisterFrameInitialization(), BuildTaggedEqual(), TryBuildPropertyLoad(), TryReduceTypeOf(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetSecondValue()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetSecondValue ( ValueNode result)
inlineprivate

Definition at line 1732 of file maglev-graph-builder.h.

1732  {
1733  // GetSecondReturnedValue must be added just after a node that calls a
1734  // builtin that expects 2 returned values. It simply binds kReturnRegister1
1735  // to a value node. Since the previous node must have been a builtin
1736  // call, the register is available in the register allocator. No gap moves
1737  // would be emitted between these two nodes.
1738  if (result->opcode() == Opcode::kCallRuntime) {
1739  DCHECK_EQ(result->Cast<CallRuntime>()->ReturnCount(), 2);
1740  } else if (result->opcode() == Opcode::kCallBuiltin) {
1741  DCHECK_EQ(result->Cast<CallBuiltin>()->ReturnCount(), 2);
1742  } else {
1743  DCHECK_EQ(result->opcode(), Opcode::kForInPrepare);
1744  }
1745  // {result} must be the last node in the current block.
1746  DCHECK_EQ(node_buffer().back(), result);
1747  return AddNewNode<GetSecondReturnedValue>({});
1748  }

References DCHECK_EQ, v8::base::internal::result, v8::internal::maglev::CallBuiltin::ReturnCount(), and v8::internal::maglev::CallRuntime::ReturnCount().

+ Here is the call graph for this function:

◆ GetSilencedNaN()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetSilencedNaN ( ValueNode value)
inlineprivate

Definition at line 1671 of file maglev-graph-builder.h.

1671  {
1672  DCHECK_EQ(value->properties().value_representation(),
1674 
1675  // We only need to check for silenced NaN in non-conversion nodes or
1676  // conversion from tagged, since they can't be signalling NaNs.
1677  if (value->properties().is_conversion()) {
1678  // A conversion node should have at least one input.
1679  DCHECK_GE(value->input_count(), 1);
1680  // If the conversion node is tagged, we could be reading a fabricated sNaN
1681  // value (built using a BufferArray for example).
1682  if (!value->input(0).node()->properties().is_tagged()) {
1683  return value;
1684  }
1685  }
1686 
1687  // Special case constants, since we know what they are.
1688  Float64Constant* constant = value->TryCast<Float64Constant>();
1689  if (constant) {
1690  constexpr double quiet_NaN = std::numeric_limits<double>::quiet_NaN();
1691  if (!constant->value().is_nan()) return constant;
1692  return GetFloat64Constant(quiet_NaN);
1693  }
1694 
1695  // Silence all other values.
1696  return AddNewNode<HoleyFloat64ToMaybeNanFloat64>({value});
1697  }

References DCHECK_EQ, DCHECK_GE, v8::internal::Float64::is_nan(), v8::internal::maglev::Float64Constant::value(), and v8::internal::value.

+ Here is the call graph for this function:

◆ GetSlotOperand()

FeedbackSlot v8::internal::maglev::MaglevGraphBuilder::GetSlotOperand ( int  operand_index) const
inlineprivate

Definition at line 1467 of file maglev-graph-builder.h.

1467  {
1468  return iterator_.GetSlotOperand(operand_index);
1469  }
FeedbackSlot GetSlotOperand(int operand_index) const

Referenced by BuildGenericBinaryOperationNode(), BuildGenericBinarySmiOperationNode(), and BuildGenericUnaryOperationNode().

+ Here is the caller graph for this function:

◆ GetSmiConstant()

SmiConstant* v8::internal::maglev::MaglevGraphBuilder::GetSmiConstant ( int  constant) const
inline

Definition at line 319 of file maglev-graph-builder.h.

319  {
320  return graph()->GetSmiConstant(constant);
321  }
SmiConstant * GetSmiConstant(int constant)
Definition: maglev-graph.h:204

References v8::internal::maglev::Graph::GetSmiConstant(), and graph().

Referenced by BuildGenericBinarySmiOperationNode(), and GetTaggedValue().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetSmiValue() [1/2]

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::GetSmiValue ( interpreter::Register  reg,
UseReprHintRecording  record_use_repr_hint = UseReprHintRecording::kRecord 
)
inlineprivate

Definition at line 1527 of file maglev-graph-builder.h.

1529  {
1530  ValueNode* value = current_interpreter_frame_.get(reg);
1531  return GetSmiValue(value, record_use_repr_hint);
1532  }

References v8::debug::anonymous_namespace{debug-interface.cc}::GetSmiValue(), and v8::internal::value.

+ Here is the call graph for this function:

◆ GetSmiValue() [2/2]

ReduceResult v8::internal::maglev::MaglevGraphBuilder::GetSmiValue ( ValueNode value,
UseReprHintRecording  record_use_repr_hint = UseReprHintRecording::kRecord 
)
private

Definition at line 1596 of file maglev-graph-builder.cc.

1597  {
1598  if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) {
1600  }
1601 
1602  NodeInfo* node_info = GetOrCreateInfoFor(value);
1603 
1604  ValueRepresentation representation =
1605  value->properties().value_representation();
1606  if (representation == ValueRepresentation::kTagged) {
1607  return BuildCheckSmi(value, !value->Is<Phi>());
1608  }
1609 
1610  auto& alternative = node_info->alternative();
1611 
1612  if (ValueNode* alt = alternative.tagged()) {
1613  // HoleyFloat64ToTagged does not canonicalize Smis by default, since it can
1614  // be expensive. If we are reading a Smi value, we should try to
1615  // canonicalize now.
1616  if (HoleyFloat64ToTagged* conversion_node =
1617  alt->TryCast<HoleyFloat64ToTagged>()) {
1618  conversion_node->SetMode(
1620  }
1621  return BuildCheckSmi(alt, !value->Is<Phi>());
1622  }
1623 
1624  switch (representation) {
1626  if (NodeTypeIsSmi(node_info->type())) {
1627  return alternative.set_tagged(AddNewNode<UnsafeSmiTagInt32>({value}));
1628  }
1629  return alternative.set_tagged(AddNewNode<CheckedSmiTagInt32>({value}));
1630  }
1632  if (NodeTypeIsSmi(node_info->type())) {
1633  return alternative.set_tagged(AddNewNode<UnsafeSmiTagUint32>({value}));
1634  }
1635  return alternative.set_tagged(AddNewNode<CheckedSmiTagUint32>({value}));
1636  }
1638  return alternative.set_tagged(AddNewNode<CheckedSmiTagFloat64>({value}));
1639  }
1641  return alternative.set_tagged(AddNewNode<CheckedSmiTagFloat64>({value}));
1642  }
1644  return alternative.set_tagged(AddNewNode<CheckedSmiTagIntPtr>({value}));
1646  UNREACHABLE();
1647  }
1648  UNREACHABLE();
1649 }
#define V8_LIKELY(condition)
Definition: v8config.h:661

References v8::internal::maglev::NodeInfo::alternative(), BuildCheckSmi(), GetOrCreateInfoFor(), v8::internal::maglev::HoleyFloat64ToTagged::kCanonicalizeSmi, v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::maglev::kRecord, v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, RecordUseReprHintIfPhi(), v8::internal::maglev::NodeInfo::type(), v8::internal::UNREACHABLE(), V8_LIKELY, and v8::internal::value.

Referenced by TryBuildPropertyCellStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetTaggedIndexConstant()

TaggedIndexConstant* v8::internal::maglev::MaglevGraphBuilder::GetTaggedIndexConstant ( int  constant)
inline

Definition at line 322 of file maglev-graph-builder.h.

322  {
323  return graph()->GetTaggedIndexConstant(constant);
324  }
TaggedIndexConstant * GetTaggedIndexConstant(int constant)
Definition: maglev-graph.h:209

References v8::internal::maglev::Graph::GetTaggedIndexConstant(), and graph().

+ Here is the call graph for this function:

◆ GetTaggedValue() [1/2]

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetTaggedValue ( interpreter::Register  reg,
UseReprHintRecording  record_use_repr_hint = UseReprHintRecording::kRecord 
)
inlineprivate

Definition at line 1534 of file maglev-graph-builder.h.

1536  {
1537  ValueNode* value = current_interpreter_frame_.get(reg);
1538  return GetTaggedValue(value, record_use_repr_hint);
1539  }

References v8::internal::value.

◆ GetTaggedValue() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetTaggedValue ( ValueNode value,
UseReprHintRecording  record_use_repr_hint = UseReprHintRecording::kRecord 
)
private

Definition at line 1532 of file maglev-graph-builder.cc.

1533  {
1534  if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) {
1536  }
1537 
1538  ValueRepresentation representation =
1539  value->properties().value_representation();
1540  if (representation == ValueRepresentation::kTagged) return value;
1541 
1542  if (Int32Constant* as_int32_constant = value->TryCast<Int32Constant>();
1543  as_int32_constant && Smi::IsValid(as_int32_constant->value())) {
1544  return GetSmiConstant(as_int32_constant->value());
1545  }
1546 
1547  NodeInfo* node_info = GetOrCreateInfoFor(value);
1548  auto& alternative = node_info->alternative();
1549 
1550  if (ValueNode* alt = alternative.tagged()) {
1551  return alt;
1552  }
1553 
1554  // This is called when converting inputs in AddNewNode. We might already have
1555  // an empty type for `value` here. Make sure we don't add unsafe conversion
1556  // nodes in that case by checking for the empty node type explicitly.
1557  // TODO(marja): The checks can be removed after we're able to bail out
1558  // earlier.
1559  switch (representation) {
1561  if (!IsEmptyNodeType(node_info->type()) &&
1562  NodeTypeIsSmi(node_info->type())) {
1563  return alternative.set_tagged(AddNewNode<UnsafeSmiTagInt32>({value}));
1564  }
1565  return alternative.set_tagged(AddNewNode<Int32ToNumber>({value}));
1566  }
1568  if (!IsEmptyNodeType(node_info->type()) &&
1569  NodeTypeIsSmi(node_info->type())) {
1570  return alternative.set_tagged(AddNewNode<UnsafeSmiTagUint32>({value}));
1571  }
1572  return alternative.set_tagged(AddNewNode<Uint32ToNumber>({value}));
1573  }
1575  return alternative.set_tagged(AddNewNode<Float64ToTagged>(
1577  }
1579  return alternative.set_tagged(AddNewNode<HoleyFloat64ToTagged>(
1581  }
1582 
1584  if (!IsEmptyNodeType(node_info->type()) &&
1585  NodeTypeIsSmi(node_info->type())) {
1586  return alternative.set_tagged(AddNewNode<UnsafeSmiTagIntPtr>({value}));
1587  }
1588  return alternative.set_tagged(AddNewNode<IntPtrToNumber>({value}));
1589 
1591  UNREACHABLE();
1592  }
1593  UNREACHABLE();
1594 }
static constexpr bool IsValid(T value) requires(std
Definition: smi.h:75

References v8::internal::maglev::NodeInfo::alternative(), GetOrCreateInfoFor(), GetSmiConstant(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::Smi::IsValid(), v8::internal::maglev::Float64ToTagged::kCanonicalizeSmi, v8::internal::maglev::kFloat64, v8::internal::maglev::HoleyFloat64ToTagged::kForceHeapNumber, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::maglev::kRecord, v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, RecordUseReprHintIfPhi(), v8::internal::maglev::NodeInfo::type(), v8::internal::UNREACHABLE(), V8_LIKELY, and v8::internal::value.

Referenced by BuildTaggedEqual(), TryBuildNamedAccess(), and VisitCompareOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetTruncatedInt32ForToNumber() [1/2]

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetTruncatedInt32ForToNumber ( interpreter::Register  reg,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
inlineprivate

Definition at line 1553 of file maglev-graph-builder.h.

1555  {
1557  allowed_input_type, conversion_type);
1558  }

◆ GetTruncatedInt32ForToNumber() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetTruncatedInt32ForToNumber ( ValueNode value,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 1683 of file maglev-graph-builder.cc.

1685  {
1687 
1688  ValueRepresentation representation =
1689  value->properties().value_representation();
1690  if (representation == ValueRepresentation::kInt32) return value;
1691  if (representation == ValueRepresentation::kUint32) {
1692  // This node is cheap (no code gen, just a bitcast), so don't cache it.
1693  return AddNewNode<TruncateUint32ToInt32>({value});
1694  }
1695 
1696  // Process constants first to avoid allocating NodeInfo for them.
1697  switch (value->opcode()) {
1698  case Opcode::kConstant: {
1699  compiler::ObjectRef object = value->Cast<Constant>()->object();
1700  if (!object.IsHeapNumber()) break;
1701  int32_t truncated_value = DoubleToInt32(object.AsHeapNumber().value());
1702  if (!Smi::IsValid(truncated_value)) break;
1703  return GetInt32Constant(truncated_value);
1704  }
1705  case Opcode::kSmiConstant:
1706  return GetInt32Constant(value->Cast<SmiConstant>()->value().value());
1707  case Opcode::kRootConstant: {
1708  Tagged<Object> root_object =
1709  local_isolate_->root(value->Cast<RootConstant>()->index());
1710  if (!IsOddball(root_object, local_isolate_)) break;
1711  int32_t truncated_value =
1712  DoubleToInt32(Cast<Oddball>(root_object)->to_number_raw());
1713  // All oddball ToNumber truncations are valid Smis.
1714  DCHECK(Smi::IsValid(truncated_value));
1715  return GetInt32Constant(truncated_value);
1716  }
1717  case Opcode::kFloat64Constant: {
1718  int32_t truncated_value =
1719  DoubleToInt32(value->Cast<Float64Constant>()->value().get_scalar());
1720  if (!Smi::IsValid(truncated_value)) break;
1721  return GetInt32Constant(truncated_value);
1722  }
1723 
1724  // We could emit unconditional eager deopts for other kinds of constant, but
1725  // it's not necessary, the appropriate checking conversion nodes will deopt.
1726  default:
1727  break;
1728  }
1729 
1730  NodeInfo* node_info = GetOrCreateInfoFor(value);
1731  auto& alternative = node_info->alternative();
1732 
1733  // If there is an int32_alternative, then that works as a truncated value
1734  // too.
1735  if (ValueNode* alt = alternative.int32()) {
1736  return alt;
1737  }
1738  if (ValueNode* alt = alternative.truncated_int32_to_number()) {
1739  return alt;
1740  }
1741 
1742  switch (representation) {
1744  NodeType old_type;
1745  EnsureType(value, allowed_input_type, &old_type);
1746  if (NodeTypeIsSmi(old_type)) {
1747  // Smi untagging can be cached as an int32 alternative, not just a
1748  // truncated alternative.
1749  return alternative.set_int32(BuildSmiUntag(value));
1750  }
1751  if (allowed_input_type == NodeType::kSmi) {
1752  return alternative.set_int32(AddNewNode<CheckedSmiUntag>({value}));
1753  }
1754  if (NodeTypeIs(old_type, allowed_input_type)) {
1755  return alternative.set_truncated_int32_to_number(
1756  AddNewNode<TruncateNumberOrOddballToInt32>({value},
1757  conversion_type));
1758  }
1759  return alternative.set_truncated_int32_to_number(
1760  AddNewNode<CheckedTruncateNumberOrOddballToInt32>({value},
1761  conversion_type));
1762  }
1764  // Ignore conversion_type for HoleyFloat64, and treat them like Float64.
1765  // ToNumber of undefined is anyway a NaN, so we'll simply truncate away
1766  // the NaN-ness of the hole, and don't need to do extra oddball checks so
1767  // we can ignore the hint (though we'll miss updating the feedback).
1769  return alternative.set_truncated_int32_to_number(
1770  AddNewNode<TruncateFloat64ToInt32>({value}));
1771  }
1772 
1774  // This is not an efficient implementation, but this only happens in
1775  // corner cases.
1776  ValueNode* value_to_number = AddNewNode<IntPtrToNumber>({value});
1777  return alternative.set_truncated_int32_to_number(
1778  AddNewNode<TruncateNumberOrOddballToInt32>(
1779  {value_to_number}, TaggedToFloat64ConversionType::kOnlyNumber));
1780  }
1783  UNREACHABLE();
1784  }
1785  UNREACHABLE();
1786 }
Tagged< Object > root(RootIndex index) const
ConstantMask::For< ConstantOp::Kind::kFloat64 > kFloat64Constant
Definition: opmasks.h:244
ConstantMask::For< ConstantOp::Kind::kSmi > kSmiConstant
Definition: opmasks.h:247
int32_t DoubleToInt32(double x)

References v8::internal::maglev::NodeInfo::alternative(), BuildSmiUntag(), v8::internal::DCHECK(), v8::internal::DoubleToInt32(), EnsureType(), v8::internal::Float64::get_scalar(), GetInt32Constant(), GetOrCreateInfoFor(), v8::internal::maglev::RootConstant::index(), v8::internal::Smi::IsValid(), v8::internal::compiler::anonymous_namespace{gap-resolver.cc}::kConstant, v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::maglev::kOnlyNumber, v8::internal::compiler::kSmi, v8::internal::maglev::kTagged, v8::internal::maglev::kTruncatedInt32, v8::internal::maglev::kUint32, local_isolate_, v8::internal::maglev::NodeTypeIs(), RecordUseReprHintIfPhi(), v8::internal::LocalIsolate::root(), v8::internal::UNREACHABLE(), v8::internal::maglev::Float64Constant::value(), v8::internal::maglev::SmiConstant::value(), v8::internal::value, and v8::internal::Tagged< Smi >::value().

Referenced by BuildTruncatingInt32BinaryOperationNodeForToNumber(), BuildTruncatingInt32BinarySmiOperationNodeForToNumber(), and BuildTruncatingInt32BitwiseNotForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetTrustedConstant()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetTrustedConstant ( compiler::HeapObjectRef  ref,
IndirectPointerTag  tag 
)
inline

Definition at line 349 of file maglev-graph-builder.h.

350  {
351  return graph()->GetTrustedConstant(ref, tag);
352  }
ValueNode * GetTrustedConstant(compiler::HeapObjectRef ref, IndirectPointerTag tag)
Definition: maglev-graph.h:478

References v8::internal::maglev::Graph::GetTrustedConstant(), and graph().

+ Here is the call graph for this function:

◆ GetTryCatchBlockForNonEagerInlining()

CatchBlockDetails v8::internal::maglev::MaglevGraphBuilder::GetTryCatchBlockForNonEagerInlining ( ExceptionHandlerInfo info)
inlineprivate

Definition at line 1276 of file maglev-graph-builder.h.

1277  {
1278  if (IsInsideTryBlock()) {
1279  return {info->catch_block_ref_address(), !info->ShouldLazyDeopt(), true,
1280  0};
1281  }
1282  if (!is_inline()) {
1283  return CatchBlockDetails{};
1284  }
1285  // Since this CatchBlockDetails is stored in a non-eager call site,
1286  // the catch block will already exist by the time inlining is attempted.
1287  CatchBlockDetails catch_details = caller_details_->catch_block;
1288  catch_details.block_already_exists = true;
1289  return catch_details;
1290  }

References v8::internal::maglev::CatchBlockDetails::block_already_exists, caller_details_, v8::internal::maglev::MaglevCallerDetails::catch_block, v8::internal::maglev::ExceptionHandlerInfo::catch_block_ref_address(), is_inline(), IsInsideTryBlock(), and v8::internal::maglev::ExceptionHandlerInfo::ShouldLazyDeopt().

+ Here is the call graph for this function:

◆ GetType()

NodeType v8::internal::maglev::MaglevGraphBuilder::GetType ( ValueNode node)
inlineprivate

Definition at line 542 of file maglev-graph-builder.h.

542  {
543  return known_node_aspects().GetType(broker(), node);
544  }
NodeType GetType(compiler::JSHeapBroker *broker, ValueNode *node) const

References broker(), v8::internal::maglev::KnownNodeAspects::GetType(), and known_node_aspects().

Referenced by BuildCheckHeapObject(), BuildCheckJSFunction(), BuildCheckJSReceiver(), BuildCheckJSReceiverOrNullOrUndefined(), BuildCheckMaps(), BuildCheckNumber(), BuildCheckSeqOneByteString(), BuildCheckSmi(), BuildCheckString(), BuildCheckStringOrOddball(), BuildCheckStringOrStringWrapper(), BuildCheckSymbol(), BuildLoadJSFunctionContext(), BuildLoadJSFunctionFeedbackCell(), BuildSmiUntag(), BuildUnwrapStringWrapper(), CanElideWriteBarrier(), GetInt32(), TryBuildNewConsString(), TryBuildStoreField(), and TrySpecializeStoreContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetUint32Constant()

Uint32Constant* v8::internal::maglev::MaglevGraphBuilder::GetUint32Constant ( int  constant)
inline

Definition at line 331 of file maglev-graph-builder.h.

331  {
332  return graph()->GetUint32Constant(constant);
333  }
Uint32Constant * GetUint32Constant(uint32_t constant)
Definition: maglev-graph.h:222

References v8::internal::maglev::Graph::GetUint32Constant(), and graph().

Referenced by GetUint32ElementIndex(), and TryFoldInt32BinaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ GetUint32ElementIndex() [1/2]

ReduceResult v8::internal::maglev::MaglevGraphBuilder::GetUint32ElementIndex ( interpreter::Register  reg)
inlineprivate

Definition at line 2457 of file maglev-graph-builder.h.

2457  {
2458  ValueNode* index_object = current_interpreter_frame_.get(reg);
2459  return GetUint32ElementIndex(index_object);
2460  }
ReduceResult GetUint32ElementIndex(interpreter::Register reg)

◆ GetUint32ElementIndex() [2/2]

ReduceResult v8::internal::maglev::MaglevGraphBuilder::GetUint32ElementIndex ( ValueNode index_object)
private

Definition at line 6087 of file maglev-graph-builder.cc.

6087  {
6088  // Don't record a Uint32 Phi use here, since the tagged path goes via
6089  // GetInt32ElementIndex, making this an Int32 Phi use.
6090 
6091  switch (object->properties().value_representation()) {
6093  return AddNewNode<CheckedIntPtrToUint32>({object});
6095  // TODO(victorgomes): Consider creating a CheckedObjectToUnsignedIndex.
6096  if (SmiConstant* constant = object->TryCast<SmiConstant>()) {
6097  int32_t value = constant->value().value();
6098  if (value < 0) {
6099  return EmitUnconditionalDeopt(DeoptimizeReason::kNotUint32);
6100  }
6101  return GetUint32Constant(value);
6102  }
6103  return AddNewNode<CheckedInt32ToUint32>({GetInt32ElementIndex(object)});
6105  if (Int32Constant* constant = object->TryCast<Int32Constant>()) {
6106  int32_t value = constant->value();
6107  if (value < 0) {
6108  return EmitUnconditionalDeopt(DeoptimizeReason::kNotUint32);
6109  }
6110  return GetUint32Constant(value);
6111  }
6112  return AddNewNode<CheckedInt32ToUint32>({object});
6114  return object;
6116  if (Float64Constant* constant = object->TryCast<Float64Constant>()) {
6117  double value = constant->value().get_scalar();
6118  uint32_t uint32_value;
6119  if (!DoubleToUint32IfEqualToSelf(value, &uint32_value)) {
6120  return EmitUnconditionalDeopt(DeoptimizeReason::kNotUint32);
6121  }
6122  if (Smi::IsValid(uint32_value)) {
6123  return GetUint32Constant(uint32_value);
6124  }
6125  }
6126  [[fallthrough]];
6128  // CheckedTruncateFloat64ToUint32 will gracefully deopt on holes.
6129  return AddNewNode<CheckedTruncateFloat64ToUint32>({object});
6130  }
6131  }
6132 }
Uint32Constant * GetUint32Constant(int constant)
bool DoubleToUint32IfEqualToSelf(double value, uint32_t *uint32_value)

References v8::internal::DoubleToUint32IfEqualToSelf(), EmitUnconditionalDeopt(), GetInt32ElementIndex(), GetUint32Constant(), v8::internal::Smi::IsValid(), v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, v8::internal::maglev::NodeBase::properties(), v8::internal::maglev::NodeBase::TryCast(), v8::internal::value, and v8::internal::maglev::OpProperties::value_representation().

+ Here is the call graph for this function:

◆ GetUint8ClampedForToNumber() [1/2]

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetUint8ClampedForToNumber ( interpreter::Register  reg)
inlineprivate

Definition at line 1568 of file maglev-graph-builder.h.

1568  {
1570  }

◆ GetUint8ClampedForToNumber() [2/2]

ValueNode * v8::internal::maglev::MaglevGraphBuilder::GetUint8ClampedForToNumber ( ValueNode value)
private

Definition at line 2127 of file maglev-graph-builder.cc.

2127  {
2128  switch (value->properties().value_representation()) {
2130  // This is not an efficient implementation, but this only happens in
2131  // corner cases.
2132  return AddNewNode<CheckedNumberToUint8Clamped>(
2133  {AddNewNode<IntPtrToNumber>({value})});
2135  if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
2136  return GetInt32Constant(ClampToUint8(constant->value().value()));
2137  }
2138  NodeInfo* info = known_node_aspects().TryGetInfoFor(value);
2139  if (info && info->alternative().int32()) {
2140  return AddNewNode<Int32ToUint8Clamped>({info->alternative().int32()});
2141  }
2142  return AddNewNode<CheckedNumberToUint8Clamped>({value});
2143  }
2144  // HoleyFloat64 is treated like Float64. ToNumber of undefined is anyway a
2145  // NaN, so we'll simply truncate away the NaN-ness of the hole, and don't
2146  // need to do extra oddball checks (though we'll miss updating the
2147  // feedback).
2150  // TODO(leszeks): Handle Float64Constant, which requires the correct
2151  // rounding for clamping.
2152  return AddNewNode<Float64ToUint8Clamped>({value});
2154  if (Int32Constant* constant = value->TryCast<Int32Constant>()) {
2155  return GetInt32Constant(ClampToUint8(constant->value()));
2156  }
2157  return AddNewNode<Int32ToUint8Clamped>({value});
2159  return AddNewNode<Uint32ToUint8Clamped>({value});
2160  }
2161  UNREACHABLE();
2162 }

References v8::internal::maglev::NodeInfo::alternative(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::ClampToUint8(), GetInt32Constant(), v8::internal::maglev::kFloat64, v8::internal::maglev::kHoleyFloat64, v8::internal::maglev::kInt32, v8::internal::maglev::kIntPtr, known_node_aspects(), v8::internal::maglev::kTagged, v8::internal::maglev::kUint32, v8::internal::maglev::KnownNodeAspects::TryGetInfoFor(), v8::internal::UNREACHABLE(), and v8::internal::value.

+ Here is the call graph for this function:

◆ GetValueOrUndefined()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::GetValueOrUndefined ( ValueNode maybe_value)
inlineprivate

Definition at line 2037 of file maglev-graph-builder.h.

2037  {
2038  if (maybe_value == nullptr) {
2039  return GetRootConstant(RootIndex::kUndefinedValue);
2040  }
2041  return maybe_value;
2042  }

◆ graph()

Graph* v8::internal::maglev::MaglevGraphBuilder::graph ( ) const
inline

◆ graph_labeller()

MaglevGraphLabeller* v8::internal::maglev::MaglevGraphBuilder::graph_labeller ( ) const
inline

Definition at line 370 of file maglev-graph-builder.h.

370  {
372  return nullptr;
373  }
MaglevGraphLabeller * graph_labeller() const
Definition: maglev-graph.h:257

References graph_, v8::internal::maglev::Graph::graph_labeller(), and v8::internal::maglev::Graph::has_graph_labeller().

Referenced by AddInitializedNodeToGraph(), CreateNewConstantNode(), LoadAndCacheContextSlot(), v8::internal::maglev::MergePointInterpreterFrameState::MergeLoop(), v8::internal::maglev::MergePointInterpreterFrameState::MergePhis(), v8::internal::maglev::MergePointInterpreterFrameState::MergeThrow(), v8::internal::maglev::MergePointInterpreterFrameState::MergeVirtualObject(), v8::internal::maglev::MergePointInterpreterFrameState::MergeVirtualObjects(), PrintVirtualObjects(), ProcessMergePointAtExceptionHandlerStart(), v8::internal::maglev::MaglevInliner::RegisterNode(), RegisterPhisWithGraphLabeller(), StoreAndCacheContextSlot(), TryBuildStoreTaggedFieldToAllocation(), and v8::internal::maglev::MergePointInterpreterFrameState::TryMergeLoop().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ gvn_hash_value() [1/8]

static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const compiler::MapRef map)
inlinestaticprivate

Definition at line 3324 of file maglev-graph-builder.h.

3324  {
3325  return map.hash_value();
3326  }

◆ gvn_hash_value() [2/8]

static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const ExternalReference ref)
inlinestaticprivate

Definition at line 3336 of file maglev-graph-builder.h.

3336  {
3337  return base::hash_value(ref.address());
3338  }

References v8::internal::ExternalReference::address(), and v8::base::hash_value().

+ Here is the call graph for this function:

◆ gvn_hash_value() [3/8]

static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const interpreter::Register reg)
inlinestaticprivate

Definition at line 3328 of file maglev-graph-builder.h.

3328  {
3329  return base::hash_value(reg.index());
3330  }

References v8::base::hash_value(), and v8::internal::interpreter::Register::index().

+ Here is the call graph for this function:

◆ gvn_hash_value() [4/8]

static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const PolymorphicAccessInfo access_info)
inlinestaticprivate

Definition at line 3340 of file maglev-graph-builder.h.

3340  {
3341  return access_info.hash_value();
3342  }

References v8::internal::maglev::PolymorphicAccessInfo::hash_value().

+ Here is the call graph for this function:

◆ gvn_hash_value() [5/8]

static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const Representation rep)
inlinestaticprivate

Definition at line 3332 of file maglev-graph-builder.h.

3332  {
3333  return base::hash_value(rep.kind());
3334  }

References v8::base::hash_value(), and v8::internal::Representation::kind().

+ Here is the call graph for this function:

◆ gvn_hash_value() [6/8]

template<typename T >
static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const T in)
inlinestaticprivate

Definition at line 3320 of file maglev-graph-builder.h.

3320  {
3321  return base::hash_value(in);
3322  }

References v8::base::hash_value().

Referenced by AddNewNodeOrGetEquivalent().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ gvn_hash_value() [7/8]

template<typename T >
static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const v8::internal::ZoneCompactSet< T > &  vector)
inlinestaticprivate

Definition at line 3345 of file maglev-graph-builder.h.

3345  {
3346  size_t hash = base::hash_value(vector.size());
3347  for (auto e : vector) {
3348  hash = fast_hash_combine(hash, gvn_hash_value(e));
3349  }
3350  return hash;
3351  }

References v8::internal::compiler::turboshaft::fast_hash_combine(), v8::base::hash_value(), and v8::internal::ZoneCompactSet< T >::size().

+ Here is the call graph for this function:

◆ gvn_hash_value() [8/8]

template<typename T >
static size_t v8::internal::maglev::MaglevGraphBuilder::gvn_hash_value ( const v8::internal::ZoneVector< T > &  vector)
inlinestaticprivate

Definition at line 3354 of file maglev-graph-builder.h.

3354  {
3355  size_t hash = base::hash_value(vector.size());
3356  for (auto e : vector) {
3357  hash = fast_hash_combine(hash, gvn_hash_value(e));
3358  }
3359  return hash;
3360  }

References v8::internal::compiler::turboshaft::fast_hash_combine(), v8::base::hash_value(), and v8::internal::ZoneVector< T >::size().

+ Here is the call graph for this function:

◆ has_graph_labeller()

bool v8::internal::maglev::MaglevGraphBuilder::has_graph_labeller ( ) const
inline

Definition at line 369 of file maglev-graph-builder.h.

369 { return graph_->has_graph_labeller(); }

References graph_, and v8::internal::maglev::Graph::has_graph_labeller().

Referenced by AddInitializedNodeToGraph(), CreateNewConstantNode(), ProcessMergePointAtExceptionHandlerStart(), v8::internal::maglev::MaglevInliner::RegisterNode(), and RegisterPhisWithGraphLabeller().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ HasDisjointType()

bool v8::internal::maglev::MaglevGraphBuilder::HasDisjointType ( ValueNode lhs,
NodeType  rhs_type 
)
inlineprivate

Definition at line 553 of file maglev-graph-builder.h.

553  {
554  return known_node_aspects().HasDisjointType(broker(), lhs, rhs_type);
555  }
bool HasDisjointType(compiler::JSHeapBroker *broker, ValueNode *lhs, NodeType rhs_type)

References broker(), v8::internal::maglev::KnownNodeAspects::HasDisjointType(), and known_node_aspects().

+ Here is the call graph for this function:

◆ HasValidInitialMap()

bool v8::internal::maglev::MaglevGraphBuilder::HasValidInitialMap ( compiler::JSFunctionRef  new_target,
compiler::JSFunctionRef  constructor 
)
private

Definition at line 8209 of file maglev-graph-builder.cc.

8210  {
8211  if (!new_target.map(broker()).has_prototype_slot()) return false;
8212  if (!new_target.has_initial_map(broker())) return false;
8213  compiler::MapRef initial_map = new_target.initial_map(broker());
8214  return initial_map.GetConstructor(broker()).equals(constructor);
8215 }

References broker(), v8::internal::compiler::JSFunctionRef::has_initial_map(), v8::internal::has_prototype_slot, v8::internal::compiler::initial_map, v8::internal::compiler::JSFunctionRef::initial_map(), and v8::internal::compiler::HeapObjectRef::map().

+ Here is the call graph for this function:

◆ HaveDisjointTypes()

bool v8::internal::maglev::MaglevGraphBuilder::HaveDisjointTypes ( ValueNode lhs,
ValueNode rhs 
)
inlineprivate

Definition at line 550 of file maglev-graph-builder.h.

550  {
551  return known_node_aspects().HaveDisjointTypes(broker(), lhs, rhs);
552  }
bool HaveDisjointTypes(compiler::JSHeapBroker *broker, ValueNode *lhs, ValueNode *rhs)

References broker(), v8::internal::maglev::KnownNodeAspects::HaveDisjointTypes(), and known_node_aspects().

Referenced by BuildTaggedEqual().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ in_optimistic_peeling_iteration()

bool v8::internal::maglev::MaglevGraphBuilder::in_optimistic_peeling_iteration ( ) const
inlineprivate

Definition at line 3235 of file maglev-graph-builder.h.

3235  {
3236  return v8_flags.maglev_optimistic_peeled_loops &&
3238  }

References v8::internal::v8_flags.

Referenced by MarkBytecodeDead(), and VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ in_peeled_iteration()

bool v8::internal::maglev::MaglevGraphBuilder::in_peeled_iteration ( ) const
inlineprivate

Definition at line 3226 of file maglev-graph-builder.h.

3226  {
3228  return peeled_iteration_count_ > 0;
3229  }

References DCHECK_GE.

Referenced by MarkBytecodeDead().

+ Here is the caller graph for this function:

◆ InferHasInPrototypeChain()

MaglevGraphBuilder::InferHasInPrototypeChainResult v8::internal::maglev::MaglevGraphBuilder::InferHasInPrototypeChain ( ValueNode receiver,
compiler::HeapObjectRef  prototype 
)
private

Definition at line 12576 of file maglev-graph-builder.cc.

12577  {
12578  auto node_info = known_node_aspects().TryGetInfoFor(receiver);
12579  // If the map set is not found, then we don't know anything about the map of
12580  // the receiver, so bail.
12581  if (!node_info || !node_info->possible_maps_are_known()) {
12582  return kMayBeInPrototypeChain;
12583  }
12584 
12585  // If the set of possible maps is empty, then there's no possible map for this
12586  // receiver, therefore this path is unreachable at runtime. We're unlikely to
12587  // ever hit this case, BuildCheckMaps should already unconditionally deopt,
12588  // but check it in case another checking operation fails to statically
12589  // unconditionally deopt.
12590  if (node_info->possible_maps().is_empty()) {
12591  // TODO(leszeks): Add an unreachable assert here.
12592  return kIsNotInPrototypeChain;
12593  }
12594 
12595  ZoneVector<compiler::MapRef> receiver_map_refs(zone());
12596 
12597  // Try to determine either that all of the {receiver_maps} have the given
12598  // {prototype} in their chain, or that none do. If we can't tell, return
12599  // kMayBeInPrototypeChain.
12600  bool all = true;
12601  bool none = true;
12602  for (compiler::MapRef map : node_info->possible_maps()) {
12603  receiver_map_refs.push_back(map);
12604  while (true) {
12605  if (IsSpecialReceiverInstanceType(map.instance_type())) {
12606  return kMayBeInPrototypeChain;
12607  }
12608  if (!map.IsJSObjectMap()) {
12609  all = false;
12610  break;
12611  }
12612  compiler::HeapObjectRef map_prototype = map.prototype(broker());
12613  if (map_prototype.equals(prototype)) {
12614  none = false;
12615  break;
12616  }
12617  map = map_prototype.map(broker());
12618  // TODO(v8:11457) Support dictionary mode protoypes here.
12619  if (!map.is_stable() || map.is_dictionary_map()) {
12620  return kMayBeInPrototypeChain;
12621  }
12622  if (map.oddball_type(broker()) == compiler::OddballType::kNull) {
12623  all = false;
12624  break;
12625  }
12626  }
12627  }
12628  DCHECK(!receiver_map_refs.empty());
12629  DCHECK_IMPLIES(all, !none);
12630  if (!all && !none) return kMayBeInPrototypeChain;
12631 
12632  {
12633  compiler::OptionalJSObjectRef last_prototype;
12634  if (all) {
12635  // We don't need to protect the full chain if we found the prototype, we
12636  // can stop at {prototype}. In fact we could stop at the one before
12637  // {prototype} but since we're dealing with multiple receiver maps this
12638  // might be a different object each time, so it's much simpler to include
12639  // {prototype}. That does, however, mean that we must check {prototype}'s
12640  // map stability.
12641  if (!prototype.IsJSObject() || !prototype.map(broker()).is_stable()) {
12642  return kMayBeInPrototypeChain;
12643  }
12644  last_prototype = prototype.AsJSObject();
12645  }
12647  receiver_map_refs, kStartAtPrototype, last_prototype);
12648  }
12649 
12650  DCHECK_EQ(all, !none);
12652 }
void DependOnStablePrototypeChains(ZoneVector< MapRef > const &receiver_maps, WhereToStart start, OptionalJSObjectRef last_prototype=OptionalJSObjectRef())
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enables Turboshaft s StaticAssert and CheckTurboshaftTypeOf operations Wasm code into JS functions via the JS to Wasm wrappers are still inlined in TurboFan For controlling whether to at see turbo inline js wasm calls enable Turboshaft s loop unrolling enable an additional Turboshaft phase that performs optimizations based on type information enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps trace Turboshaft s if else to switch reducer invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the preconfigured old space Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats TracingFlags::gc_stats track native contexts that are expected to be garbage collected verify heap pointers before and after GC memory reducer runs GC with ReduceMemoryFootprint flag Maximum number of memory reducer GCs scheduled Old gen GC speed is computed directly from gc tracer counters Perform compaction on full GCs based on V8 s default heuristics Perform compaction on every full GC Perform code space compaction when finalizing a full GC with stack Stress GC compaction to flush out bugs with moving objects flush of baseline code when it has not been executed recently Use time base code flushing instead of age Use a progress bar to scan large objects in increments when incremental marking is active force incremental marking for small heaps and run it more often Release pooled large pages after X seconds prints number of allocations and enables analysis mode for gc fuzz e g stress stress scavenge force scavenge at random points between and reclaim otherwise unreachable unmodified wrapper objects when possible discard the memory pool before invoking the GC on memory pressure or last resort GCs Delay before memory reducer start virtual randomize memory reservations by ignoring any hints passed when allocating pages use incremental marking for CppHeap cppheap_concurrent_marking c value for membalancer A special constant to balance between memory and space tradeoff The smaller the more memory it uses enable use of SSE4 instructions if available enable use of SAHF instruction if enable use of AVX VNNI instructions if available enable use of POPCNT instruction if available force all emitted branches to be in long enable use of partial constant none
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enables Turboshaft s StaticAssert and CheckTurboshaftTypeOf operations Wasm code into JS functions via the JS to Wasm wrappers are still inlined in TurboFan For controlling whether to at all
bool IsSpecialReceiverInstanceType(InstanceType instance_type)
Definition: objects-inl.h:1654
@ kStartAtPrototype
Definition: globals.h:1729

References all, broker(), v8::internal::DCHECK(), DCHECK_EQ, DCHECK_IMPLIES, v8::internal::ZoneVector< T >::empty(), v8::internal::IsSpecialReceiverInstanceType(), v8::internal::compiler::kNull, v8::internal::kStartAtPrototype, v8::internal::compiler::HeapObjectRef::map(), none, v8::internal::prototype, and v8::internal::ZoneVector< T >::push_back().

+ Here is the call graph for this function:

◆ InitializePredecessorCount()

void v8::internal::maglev::MaglevGraphBuilder::InitializePredecessorCount ( uint32_t  offset,
int  amount 
)
inlineprivate

Definition at line 3198 of file maglev-graph-builder.h.

3198  {
3199  DCHECK_LE(offset, bytecode().length());
3200  DCHECK_NULL(merge_states_[offset]);
3201  predecessor_count_[offset] = amount;
3202  }

References DCHECK_LE, DCHECK_NULL, and v8::internal::length.

◆ InitializeRegister()

void v8::internal::maglev::MaglevGraphBuilder::InitializeRegister ( interpreter::Register  reg,
ValueNode value 
)

Definition at line 1081 of file maglev-graph-builder.cc.

1082  {
1084  reg, value ? value : AddNewNode<InitialValue>({}, reg));
1085 }

References current_interpreter_frame_, v8::internal::maglev::InterpreterFrameState::set(), and v8::internal::value.

Referenced by BuildRegisterFrameInitialization().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ inline_exit_offset()

int v8::internal::maglev::MaglevGraphBuilder::inline_exit_offset ( ) const
inlineprivate

Definition at line 3163 of file maglev-graph-builder.h.

3163  {
3164  DCHECK(is_inline());
3165  return bytecode().length();
3166  }

References DCHECK.

Referenced by KillPeeledLoopTargets(), MaglevGraphBuilder(), and MarkBytecodeDead().

+ Here is the caller graph for this function:

◆ inlining_depth()

int v8::internal::maglev::MaglevGraphBuilder::inlining_depth ( ) const
inline

Definition at line 378 of file maglev-graph-builder.h.

378 { return compilation_unit_->inlining_depth(); }

References compilation_unit_, and v8::internal::maglev::MaglevCompilationUnit::inlining_depth().

+ Here is the call graph for this function:

◆ is_eager_inline()

bool v8::internal::maglev::MaglevGraphBuilder::is_eager_inline ( ) const
inline

Definition at line 380 of file maglev-graph-builder.h.

380  {
381  DCHECK(is_inline());
383  v8_flags.maglev_non_eager_inlining ||
384  v8_flags.turbolev_non_eager_inlining);
386  }

References caller_details_, v8::internal::DCHECK(), DCHECK_IMPLIES, v8::internal::maglev::MaglevCallerDetails::is_eager_inline, is_inline(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ is_inline()

bool v8::internal::maglev::MaglevGraphBuilder::is_inline ( ) const
inline

◆ is_loop_effect_tracking()

bool v8::internal::maglev::MaglevGraphBuilder::is_loop_effect_tracking ( )
inlineprivate

Definition at line 3242 of file maglev-graph-builder.h.

3242 { return loop_effects_; }

Referenced by CanTrackObjectChanges(), and StoreAndCacheContextSlot().

+ Here is the caller graph for this function:

◆ is_loop_effect_tracking_enabled()

bool v8::internal::maglev::MaglevGraphBuilder::is_loop_effect_tracking_enabled ( )
inlineprivate

Definition at line 3239 of file maglev-graph-builder.h.

3239  {
3240  return v8_flags.maglev_escape_analysis || v8_flags.maglev_licm;
3241  }

References v8::internal::v8_flags.

Referenced by VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ is_non_eager_inlining_enabled()

bool v8::internal::maglev::MaglevGraphBuilder::is_non_eager_inlining_enabled ( ) const
inline

Definition at line 419 of file maglev-graph-builder.h.

419  {
420  if (is_turbolev()) {
421  return v8_flags.turbolev_non_eager_inlining;
422  }
423  return v8_flags.maglev_non_eager_inlining;
424  }

References is_turbolev(), and v8::internal::v8_flags.

Referenced by AttachExceptionHandlerInfo().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ is_turbolev()

bool v8::internal::maglev::MaglevGraphBuilder::is_turbolev ( ) const
inline

Definition at line 417 of file maglev-graph-builder.h.

417 { return is_turbolev_; }

References is_turbolev_.

Referenced by is_non_eager_inlining_enabled(), max_inline_depth(), max_inlined_bytecode_size(), max_inlined_bytecode_size_cumulative(), max_inlined_bytecode_size_small(), min_inlining_frequency(), ShouldEmitInterruptBudgetChecks(), and TryBuildNewConsString().

+ Here is the caller graph for this function:

◆ IsInsideLoop()

bool v8::internal::maglev::MaglevGraphBuilder::IsInsideLoop ( ) const
inlineprivate

Definition at line 3145 of file maglev-graph-builder.h.

3145  {
3146  if (is_inline() && caller_details()->is_inside_loop) return true;
3147  int loop_header_offset =
3149  if (loop_header_offset != -1) {
3150  const compiler::LoopInfo& loop_info =
3151  bytecode_analysis().GetLoopInfoFor(loop_header_offset);
3152  if (loop_info.parent_offset() == -1) {
3153  // This is the outmost loop, if we're actually inside the peel, we are
3154  // not really in a loop.
3156  }
3157  return true;
3158  }
3159  return false;
3160  }

References v8::internal::compiler::LoopInfo::parent_offset().

Referenced by CanTrackObjectChanges().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ IsInsideTryBlock()

bool v8::internal::maglev::MaglevGraphBuilder::IsInsideTryBlock ( ) const
inlineprivate

Definition at line 1256 of file maglev-graph-builder.h.

1256 { return catch_block_stack_.size() > 0; }

References catch_block_stack_.

Referenced by AttachExceptionHandlerInfo(), CanTrackObjectChanges(), GetCatchBlockFrameState(), GetCurrentTryCatchBlock(), GetTryCatchBlockForNonEagerInlining(), and VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ IsOffsetAMergePoint()

bool v8::internal::maglev::MaglevGraphBuilder::IsOffsetAMergePoint ( int  offset)
inlineprivate

Definition at line 720 of file maglev-graph-builder.h.

720  {
721  return merge_states_[offset] != nullptr;
722  }

References merge_states_.

◆ IsRegisterEqualToAccumulator()

bool v8::internal::maglev::MaglevGraphBuilder::IsRegisterEqualToAccumulator ( int  operand_index)
inlineprivate

Definition at line 1699 of file maglev-graph-builder.h.

1699  {
1700  interpreter::Register source = iterator_.GetRegisterOperand(operand_index);
1701  return current_interpreter_frame_.get(source) ==
1703  }

Referenced by BuildTruncatingInt32BinaryOperationNodeForToNumber(), and VisitCompareOperation().

+ Here is the caller graph for this function:

◆ KillPeeledLoopTargets()

void v8::internal::maglev::MaglevGraphBuilder::KillPeeledLoopTargets ( int  peelings)
inlineprivate

Definition at line 736 of file maglev-graph-builder.h.

736  {
737  DCHECK_EQ(iterator_.current_bytecode(), interpreter::Bytecode::kJumpLoop);
738  int target = iterator_.GetJumpTargetOffset();
739  // Since we ended up not peeling we must kill all the doubly accounted
740  // jumps out of the loop.
741  interpreter::BytecodeArrayIterator iterator(bytecode().object());
742  for (iterator.SetOffset(target);
743  iterator.current_offset() < iterator_.current_offset();
744  iterator.Advance()) {
745  interpreter::Bytecode bc = iterator.current_bytecode();
746  DCHECK_NE(bc, interpreter::Bytecode::kJumpLoop);
747  int kill = -1;
749  iterator.GetJumpTargetOffset() > iterator_.current_offset()) {
750  kill = iterator.GetJumpTargetOffset();
751  } else if (is_inline() && interpreter::Bytecodes::Returns(bc)) {
752  kill = inline_exit_offset();
753  }
754  if (kill != -1) {
755  if (merge_states_[kill]) {
756  for (int i = 0; i < peelings; ++i) {
758  }
759  }
760  UpdatePredecessorCount(kill, -peelings);
761  }
762  }
763  }
void MergeDead(const MaglevCompilationUnit &compilation_unit, unsigned num=1)

References v8::internal::interpreter::BytecodeArrayIterator::Advance(), bytecode(), compilation_unit_, v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), v8::internal::interpreter::BytecodeArrayIterator::current_offset(), DCHECK_EQ, DCHECK_NE, v8::internal::interpreter::BytecodeArrayIterator::GetJumpTargetOffset(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, inline_exit_offset(), is_inline(), v8::internal::interpreter::Bytecodes::IsJump(), iterator_, merge_states_, v8::internal::maglev::MergePointInterpreterFrameState::MergeDead(), v8::internal::interpreter::Bytecodes::Returns(), v8::internal::interpreter::BytecodeArrayIterator::SetOffset(), and UpdatePredecessorCount().

+ Here is the call graph for this function:

◆ known_node_aspects()

KnownNodeAspects& v8::internal::maglev::MaglevGraphBuilder::known_node_aspects ( )
inlineprivate

◆ LoadAndCacheContextSlot()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::LoadAndCacheContextSlot ( ValueNode context,
int  offset,
ContextSlotMutability  slot_mutability,
ContextMode  context_mode 
)
private

Definition at line 3565 of file maglev-graph-builder.cc.

3567  {
3568  int offset = Context::OffsetOfElementAt(index);
3569  ValueNode*& cached_value =
3570  slot_mutability == kMutable
3571  ? known_node_aspects().loaded_context_slots[{context, offset}]
3572  : known_node_aspects().loaded_context_constants[{context, offset}];
3573  if (cached_value) {
3574  if (v8_flags.trace_maglev_graph_building) {
3575  std::cout << " * Reusing cached context slot "
3576  << PrintNodeLabel(graph_labeller(), context) << "[" << offset
3577  << "]: " << PrintNode(graph_labeller(), cached_value)
3578  << std::endl;
3579  }
3580  return cached_value;
3581  }
3582  if (slot_mutability == kMutable &&
3583  !known_node_aspects().loaded_context_slots.empty()) {
3585  broker(), local_isolate(), context);
3586  }
3587  if (context_mode == ContextMode::kHasContextCells &&
3588  (v8_flags.script_context_cells || v8_flags.function_context_cells) &&
3589  slot_mutability == kMutable) {
3590  // We collect feedback only for mutable context slots.
3591  cached_value = TrySpecializeLoadContextSlot(context, index);
3592  if (cached_value) return cached_value;
3593  return cached_value = BuildLoadTaggedField<LoadTaggedFieldForContextSlot>(
3594  context, offset);
3595  }
3596  return cached_value =
3597  BuildLoadTaggedField<LoadTaggedFieldForContextSlotNoCells>(context,
3598  offset);
3599 }
ValueNode * TrySpecializeLoadContextSlot(ValueNode *context, int index)
void UpdateMayHaveAliasingContexts(compiler::JSHeapBroker *broker, LocalIsolate *local_isolate, ValueNode *context)
ZoneMap< std::tuple< ValueNode *, int >, ValueNode * > loaded_context_constants

References broker(), graph_labeller(), v8::internal::index, v8::internal::kHasContextCells, kMutable, known_node_aspects(), v8::internal::maglev::KnownNodeAspects::loaded_context_constants, v8::internal::maglev::KnownNodeAspects::loaded_context_slots, local_isolate(), v8::internal::Context::OffsetOfElementAt(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), TrySpecializeLoadContextSlot(), v8::internal::maglev::KnownNodeAspects::UpdateMayHaveAliasingContexts(), and v8::internal::v8_flags.

Referenced by BuildLoadContextSlot(), CheckContextExtensions(), and TryBuildScriptContextLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ LoadRegister()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::LoadRegister ( int  operand_index)
inlineprivate

Definition at line 1705 of file maglev-graph-builder.h.

1705  {
1707  iterator_.GetRegisterOperand(operand_index));
1708  }

Referenced by BuildGenericBinaryOperationNode(), BuildInt32BinaryOperationNode(), TryReduceCompareEqualAgainstConstant(), VisitBinaryOperation(), and VisitCompareOperation().

+ Here is the caller graph for this function:

◆ LoadRegisterHoleyFloat64ForToNumber()

ValueNode* v8::internal::maglev::MaglevGraphBuilder::LoadRegisterHoleyFloat64ForToNumber ( int  operand_index,
NodeType  allowed_input_type,
TaggedToFloat64ConversionType  conversion_type 
)
inlineprivate

Definition at line 1710 of file maglev-graph-builder.h.

1712  {
1714  iterator_.GetRegisterOperand(operand_index), allowed_input_type,
1715  conversion_type);
1716  }

Referenced by BuildFloat64BinaryOperationNodeForToNumber().

+ Here is the caller graph for this function:

◆ local_isolate()

LocalIsolate* v8::internal::maglev::MaglevGraphBuilder::local_isolate ( ) const
inline

Definition at line 367 of file maglev-graph-builder.h.

367 { return local_isolate_; }

References local_isolate_.

Referenced by LoadAndCacheContextSlot(), and StoreAndCacheContextSlot().

+ Here is the caller graph for this function:

◆ MaglevIsTopTier()

bool v8::internal::maglev::MaglevGraphBuilder::MaglevIsTopTier ( ) const
inlineprivate

Definition at line 591 of file maglev-graph-builder.h.

591 { return !v8_flags.turbofan && v8_flags.maglev; }

References v8::internal::v8_flags.

◆ MarkBranchDeadAndJumpIfNeeded()

void v8::internal::maglev::MaglevGraphBuilder::MarkBranchDeadAndJumpIfNeeded ( bool  is_jump_taken)
private

Definition at line 14648 of file maglev-graph-builder.cc.

14648  {
14649  int jump_offset = iterator_.GetJumpTargetOffset();
14650  if (is_jump_taken) {
14651  BasicBlock* block = FinishBlock<Jump>({}, &jump_targets_[jump_offset]);
14653  MergeIntoFrameState(block, jump_offset);
14654  } else {
14655  MergeDeadIntoFrameState(jump_offset);
14656  }
14657 }

◆ MarkBytecodeDead()

void v8::internal::maglev::MaglevGraphBuilder::MarkBytecodeDead ( )
inlineprivate

Definition at line 765 of file maglev-graph-builder.h.

765  {
767  if (v8_flags.trace_maglev_graph_building) {
768  std::cout << "== Dead ==\n"
769  << std::setw(4) << iterator_.current_offset() << " : ";
772  std::cout << std::endl;
773  }
774 
775  // If the current bytecode is a jump to elsewhere, then this jump is
776  // also dead and we should make sure to merge it as a dead predecessor.
779  // Jumps merge into their target, and conditional jumps also merge into
780  // the fallthrough.
784  }
785  } else if (bytecode == interpreter::Bytecode::kJumpLoop) {
786  // JumpLoop merges into its loop header, which has to be treated
787  // specially by the merge.
790  }
792  // Switches merge into their targets, and into the fallthrough.
793  for (auto offset : iterator_.GetJumpTableTargetOffsets()) {
794  MergeDeadIntoFrameState(offset.target_offset);
795  }
799  // Any other bytecode that doesn't return or throw will merge into the
800  // fallthrough.
804  }
805 
806  // TODO(leszeks): We could now continue iterating the bytecode
807  }
static std::ostream & Decode(std::ostream &os, const uint8_t *bytecode_start, bool with_hex=true)
static constexpr bool IsForwardJump(Bytecode bytecode)
Definition: bytecodes.h:816

References bytecode(), v8::internal::interpreter::BytecodeArrayIterator::current_address(), current_block_, v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), v8::internal::interpreter::BytecodeArrayIterator::current_offset(), DCHECK_NULL, v8::internal::interpreter::BytecodeDecoder::Decode(), v8::internal::interpreter::BytecodeArrayIterator::GetJumpTableTargetOffsets(), v8::internal::interpreter::BytecodeArrayIterator::GetJumpTargetOffset(), in_optimistic_peeling_iteration(), in_peeled_iteration(), inline_exit_offset(), is_inline(), v8::internal::interpreter::Bytecodes::IsConditionalJump(), v8::internal::interpreter::Bytecodes::IsForwardJump(), v8::internal::interpreter::Bytecodes::IsSwitch(), iterator_, MergeDeadIntoFrameState(), MergeDeadLoopIntoFrameState(), v8::internal::interpreter::BytecodeArrayIterator::next_offset(), v8::internal::interpreter::Bytecodes::Returns(), v8::internal::interpreter::Bytecodes::UnconditionallyThrows(), and v8::internal::v8_flags.

Referenced by VisitSingleBytecode().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ MarkNodeDead()

void v8::internal::maglev::MaglevGraphBuilder::MarkNodeDead ( Node node)
inlineprivate

Definition at line 3373 of file maglev-graph-builder.h.

3373  {
3374  for (int i = 0; i < node->input_count(); ++i) {
3375  node->input(i).clear();
3376  }
3377  node->OverwriteWith(Opcode::kDead);
3378  }

References v8::internal::maglev::Input::clear(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::maglev::NodeBase::input(), v8::internal::maglev::NodeBase::input_count(), and v8::internal::maglev::NodeBase::OverwriteWith().

Referenced by StoreAndCacheContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ MarkPossibleSideEffect()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::MarkPossibleSideEffect ( NodeT node)
inlineprivate

Definition at line 1821 of file maglev-graph-builder.h.

1821  {
1822  if constexpr (NodeT::kProperties.can_read() ||
1823  NodeT::kProperties.can_deopt() ||
1824  NodeT::kProperties.can_throw()) {
1826  }
1827 
1828  if constexpr (Node::opcode_of<NodeT> != Opcode::kAllocationBlock &&
1829  (NodeT::kProperties.can_deopt() ||
1830  NodeT::kProperties.can_throw() ||
1831  NodeT::kProperties.can_allocate())) {
1833  }
1834 
1835  // Don't do anything for nodes without side effects.
1836  if constexpr (!NodeT::kProperties.can_write()) return;
1837 
1838  if (v8_flags.maglev_cse) {
1840  }
1841 
1842  // We only need to clear unstable node aspects on the current builder, not
1843  // the parent, since we'll anyway copy the known_node_aspects to the parent
1844  // once we finish the inlined function.
1845 
1846  if constexpr (IsElementsArrayWrite(Node::opcode_of<NodeT>)) {
1847  node->ClearElementsProperties(known_node_aspects());
1848  if (is_loop_effect_tracking()) {
1849  loop_effects_->keys_cleared.insert(
1851  }
1852  } else if constexpr (!IsSimpleFieldStore(Node::opcode_of<NodeT>) &&
1853  !IsTypedArrayStore(Node::opcode_of<NodeT>)) {
1854  // Don't change known node aspects for simple field stores. The only
1855  // relevant side effect on these is writes to objects which invalidate
1856  // loaded properties and context slots, and we invalidate these already as
1857  // part of emitting the store.
1858  node->ClearUnstableNodeAspects(known_node_aspects());
1859  if (is_loop_effect_tracking()) {
1861  }
1862  }
1863 
1864  // Simple field stores can't possibly change or migrate the map.
1865  static constexpr bool is_possible_map_change =
1866  !IsSimpleFieldStore(Node::opcode_of<NodeT>);
1867 
1868  // All user-observable side effects need to clear state that is cached on
1869  // the builder. This reset has to be propagated up through the parents.
1870  // TODO(leszeks): What side effects aren't observable? Maybe migrations?
1871  ResetBuilderCachedState<is_possible_map_change>();
1872  }
constexpr bool IsSimpleFieldStore(Opcode opcode)
Definition: maglev-ir.h:566
constexpr bool IsTypedArrayStore(Opcode opcode)
Definition: maglev-ir.h:581
constexpr bool IsElementsArrayWrite(Opcode opcode)
Definition: maglev-ir.h:577
ZoneSet< KnownNodeAspects::LoadedPropertyMapKey > keys_cleared

References v8::internal::maglev::IsElementsArrayWrite(), v8::internal::maglev::IsSimpleFieldStore(), v8::internal::maglev::IsTypedArrayStore(), and v8::internal::v8_flags.

Referenced by AttachExtraInfoAndAddToGraph().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ max_inline_depth()

int v8::internal::maglev::MaglevGraphBuilder::max_inline_depth ( )
inline

Definition at line 456 of file maglev-graph-builder.h.

456  {
457  if (is_turbolev()) {
458  // This is just to avoid some corner cases, especially since we allow
459  // recursive inlining.
460  constexpr int kMaxDepthForInlining = 50;
461  return kMaxDepthForInlining;
462  } else {
463  return v8_flags.max_maglev_inline_depth;
464  }
465  }

References is_turbolev(), v8::internal::compiler::anonymous_namespace{js-inlining.cc}::kMaxDepthForInlining, and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ max_inlined_bytecode_size()

int v8::internal::maglev::MaglevGraphBuilder::max_inlined_bytecode_size ( )
inline

Definition at line 428 of file maglev-graph-builder.h.

428  {
429  if (is_turbolev()) {
430  return v8_flags.max_inlined_bytecode_size;
431  } else {
432  return v8_flags.max_maglev_inlined_bytecode_size;
433  }
434  }

References is_turbolev(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ max_inlined_bytecode_size_cumulative()

int v8::internal::maglev::MaglevGraphBuilder::max_inlined_bytecode_size_cumulative ( )
inline

Definition at line 449 of file maglev-graph-builder.h.

449  {
450  if (is_turbolev()) {
451  return v8_flags.max_inlined_bytecode_size_cumulative;
452  } else {
453  return v8_flags.max_maglev_inlined_bytecode_size_cumulative;
454  }
455  }

References is_turbolev(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ max_inlined_bytecode_size_small()

int v8::internal::maglev::MaglevGraphBuilder::max_inlined_bytecode_size_small ( )
inline

Definition at line 435 of file maglev-graph-builder.h.

435  {
436  if (is_turbolev()) {
437  return v8_flags.max_inlined_bytecode_size_small;
438  } else {
439  return v8_flags.max_maglev_inlined_bytecode_size_small;
440  }
441  }

References is_turbolev(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ MergeDeadIntoFrameState()

void v8::internal::maglev::MaglevGraphBuilder::MergeDeadIntoFrameState ( int  target)
private

Definition at line 14566 of file maglev-graph-builder.cc.

14566  {
14567  // If there already is a frame state, merge.
14568  if (merge_states_[target]) {
14570  predecessor_count(target));
14572  // If this merge is the last one which kills a loop merge, remove that
14573  // merge state.
14574  if (merge_states_[target]->is_unmerged_unreachable_loop()) {
14575  if (v8_flags.trace_maglev_graph_building) {
14576  std::cout << "! Killing loop merge state at @" << target << std::endl;
14577  }
14578  merge_states_[target] = nullptr;
14579  }
14580  }
14581  // If there is no merge state yet, don't create one, but just reduce the
14582  // number of possible predecessors to zero.
14584 }
void DecrementDeadPredecessorAndAccountForPeeling(uint32_t offset)

References DCHECK_EQ, and v8::internal::v8_flags.

Referenced by MarkBytecodeDead().

+ Here is the caller graph for this function:

◆ MergeDeadLoopIntoFrameState()

void v8::internal::maglev::MaglevGraphBuilder::MergeDeadLoopIntoFrameState ( int  target)
private

Definition at line 14586 of file maglev-graph-builder.cc.

14586  {
14587  // Check if the Loop entry is dead already (e.g. an outer loop from OSR).
14588  if (V8_UNLIKELY(!merge_states_[target]) && predecessor_count(target) == 0) {
14589  static_assert(kLoopsMustBeEnteredThroughHeader);
14590  return;
14591  }
14592  // If there already is a frame state, merge.
14593  if (V8_LIKELY(merge_states_[target])) {
14595  predecessor_count(target));
14597  !merge_states_[target]->is_unmerged_unreachable_loop()) {
14598  EndLoopEffects(target);
14599  }
14601  }
14602  // If there is no merge state yet, don't create one, but just reduce the
14603  // number of possible predecessors to zero.
14605 }
void MergeDeadLoop(const MaglevCompilationUnit &compilation_unit)

References DCHECK_EQ, V8_LIKELY, and V8_UNLIKELY.

Referenced by MarkBytecodeDead().

+ Here is the caller graph for this function:

◆ MergeIntoFrameState()

void v8::internal::maglev::MaglevGraphBuilder::MergeIntoFrameState ( BasicBlock block,
int  target 
)
private

Definition at line 14545 of file maglev-graph-builder.cc.

14546  {
14547  if (merge_states_[target] == nullptr) {
14548  bool jumping_to_peeled_iteration = bytecode_analysis().IsLoopHeader(target);
14549  DCHECK_EQ(jumping_to_peeled_iteration,
14551  const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(target);
14552  if (jumping_to_peeled_iteration) {
14553  // The peeled iteration is missing the backedge.
14555  }
14556  // If there's no target frame state, allocate a new one.
14559  predecessor_count(target), predecessor, liveness);
14560  } else {
14561  // If there already is a frame state, merge.
14562  merge_states_[target]->Merge(this, current_interpreter_frame_, predecessor);
14563  }
14564 }
static MergePointInterpreterFrameState * New(const MaglevCompilationUnit &info, const InterpreterFrameState &state, int merge_offset, int predecessor_count, BasicBlock *predecessor, const compiler::BytecodeLivenessState *liveness)
void Merge(MaglevGraphBuilder *graph_builder, InterpreterFrameState &unmerged, BasicBlock *predecessor)

References DCHECK_EQ, and v8::internal::anonymous_namespace{intl-objects.cc}::New().

Referenced by EndPrologue().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ MergeIntoInlinedReturnFrameState()

void v8::internal::maglev::MaglevGraphBuilder::MergeIntoInlinedReturnFrameState ( BasicBlock block)
private

Definition at line 14607 of file maglev-graph-builder.cc.

14608  {
14609  int target = inline_exit_offset();
14610  if (merge_states_[target] == nullptr) {
14611  // All returns should have the same liveness, which is that only the
14612  // accumulator is live.
14613  const compiler::BytecodeLivenessState* liveness = GetInLiveness();
14614  DCHECK(liveness->AccumulatorIsLive());
14615  DCHECK_EQ(liveness->live_value_count(), 1);
14616 
14617  // If there's no target frame state, allocate a new one.
14620  predecessor_count(target), predecessor, liveness);
14621  } else {
14622  // Again, all returns should have the same liveness, so double check this.
14624  *merge_states_[target]->frame_state().liveness()));
14625  merge_states_[target]->Merge(this, current_interpreter_frame_, predecessor);
14626  }
14627 }
bool Equals(bool ignore_case, unibrow::Mapping< unibrow::Ecma262Canonicalize > *canonicalize, unibrow::uchar a, unibrow::uchar b)

References v8::internal::compiler::BytecodeLivenessState::AccumulatorIsLive(), v8::internal::DCHECK(), DCHECK_EQ, v8::internal::anonymous_namespace{regexp-compiler-tonode.cc}::Equals(), v8::internal::compiler::BytecodeLivenessState::live_value_count(), and v8::internal::anonymous_namespace{intl-objects.cc}::New().

+ Here is the call graph for this function:

◆ min_inlining_frequency()

float v8::internal::maglev::MaglevGraphBuilder::min_inlining_frequency ( )
inline

Definition at line 442 of file maglev-graph-builder.h.

442  {
443  if (is_turbolev()) {
444  return v8_flags.min_inlining_frequency;
445  } else {
446  return v8_flags.min_maglev_inlining_frequency;
447  }
448  }

References is_turbolev(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ MinimizeContextChainDepth()

void v8::internal::maglev::MaglevGraphBuilder::MinimizeContextChainDepth ( ValueNode **  context,
size_t depth 
)
private

Definition at line 170 of file maglev-graph-builder.cc.

171  {
172  while (*depth > 0) {
173  ValueNode* parent_context = TryGetParentContext(*context);
174  if (parent_context == nullptr) return;
175  *context = parent_context;
176  (*depth)--;
177  }
178 }
ValueNode * TryGetParentContext(ValueNode *node)

References TryGetParentContext().

+ Here is the call graph for this function:

◆ MoveNodeBetweenRegisters()

void v8::internal::maglev::MaglevGraphBuilder::MoveNodeBetweenRegisters ( interpreter::Register  src,
interpreter::Register  dst 
)
inlineprivate

Definition at line 1511 of file maglev-graph-builder.h.

1512  {
1513  // We shouldn't be moving newly created nodes between registers.
1514  DCHECK(!IsNodeCreatedForThisBytecode(current_interpreter_frame_.get(src)));
1516 
1518  }

References DCHECK, and DCHECK_NOT_NULL.

◆ need_checkpointed_loop_entry()

bool v8::internal::maglev::MaglevGraphBuilder::need_checkpointed_loop_entry ( )
inline

Definition at line 390 of file maglev-graph-builder.h.

390  {
391  return v8_flags.maglev_speculative_hoist_phi_untagging ||
392  v8_flags.maglev_licm;
393  }

References v8::internal::v8_flags.

Referenced by VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ NegateBranchType()

static BranchType v8::internal::maglev::MaglevGraphBuilder::NegateBranchType ( BranchType  jump_type)
inlinestaticprivate

Definition at line 2849 of file maglev-graph-builder.h.

◆ NewObjectId()

uint32_t v8::internal::maglev::MaglevGraphBuilder::NewObjectId ( )
inline

Definition at line 415 of file maglev-graph-builder.h.

415 { return graph_->NewObjectId(); }

References graph_, and v8::internal::maglev::Graph::NewObjectId().

Referenced by v8::internal::maglev::MergePointInterpreterFrameState::MergeVirtualObject().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ next_offset()

int v8::internal::maglev::MaglevGraphBuilder::next_offset ( ) const
inlineprivate

Definition at line 1886 of file maglev-graph-builder.h.

◆ node_buffer()

ZoneVector<Node*>& v8::internal::maglev::MaglevGraphBuilder::node_buffer ( )
inline

Definition at line 413 of file maglev-graph-builder.h.

413 { return graph_->node_buffer(); }
ZoneVector< Node * > & node_buffer()
Definition: maglev-graph.h:148

References graph_, and v8::internal::maglev::Graph::node_buffer().

Referenced by AddInitializedNodeToGraph(), and v8::internal::maglev::MaglevInliner::AddNodeAtBlockEnd().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ OsrAnalyzePrequel()

void v8::internal::maglev::MaglevGraphBuilder::OsrAnalyzePrequel ( )

Definition at line 14409 of file maglev-graph-builder.cc.

14409  {
14412 
14413  // TODO(olivf) We might want to start collecting known_node_aspects_ here.
14415  iterator_.Advance()) {
14416  switch (iterator_.current_bytecode()) {
14417  case interpreter::Bytecode::kPushContext: {
14419  // Nothing left to analyze...
14420  return;
14421  }
14422  default:
14423  continue;
14424  }
14425  }
14426 }

References DCHECK_EQ, and graph().

Referenced by Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ parameter_count()

int v8::internal::maglev::MaglevGraphBuilder::parameter_count ( ) const
inlineprivate

Definition at line 3127 of file maglev-graph-builder.h.

3127 { return compilation_unit_->parameter_count(); }

Referenced by Build(), and GetArgument().

+ Here is the caller graph for this function:

◆ parameter_count_without_receiver()

int v8::internal::maglev::MaglevGraphBuilder::parameter_count_without_receiver ( ) const
inlineprivate

Definition at line 3128 of file maglev-graph-builder.h.

3128 { return parameter_count() - 1; }

◆ PeelLoop()

void v8::internal::maglev::MaglevGraphBuilder::PeelLoop ( )

Definition at line 14258 of file maglev-graph-builder.cc.

14258  {
14259  int loop_header = iterator_.current_offset();
14260  DCHECK(loop_headers_to_peel_.Contains(loop_header));
14262  peeled_iteration_count_ = v8_flags.maglev_optimistic_peeled_loops ? 2 : 1;
14263  any_peeled_loop_ = true;
14264  allow_loop_peeling_ = false;
14265 
14266  if (v8_flags.trace_maglev_graph_building) {
14267  std::cout << " * Begin loop peeling...." << std::endl;
14268  }
14269 
14270  while (in_peeled_iteration()) {
14272  }
14273  // Emit the actual (not peeled) loop if needed.
14274  if (loop_header == iterator_.current_offset()) {
14276  }
14277  allow_loop_peeling_ = true;
14278 }

References v8::internal::DCHECK(), and v8::internal::v8_flags.

Referenced by BuildBody().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ predecessor_count()

uint32_t v8::internal::maglev::MaglevGraphBuilder::predecessor_count ( uint32_t  offset)
inlineprivate

Definition at line 3211 of file maglev-graph-builder.h.

3211  {
3212  DCHECK_LE(offset, bytecode().length());
3215  uint32_t actual = predecessor_count_[offset];
3216  DCHECK_IMPLIES(merge_states_[offset],
3217  merge_states_[offset]->predecessor_count() == actual);
3218  return actual;
3219  }

References DCHECK_IMPLIES, DCHECK_LE, and v8::internal::length.

Referenced by BuildMergeStates(), and VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ Print() [1/3]

void v8::internal::maglev::MaglevGraphBuilder::Print ( const char *  str)
inlineprivate

Definition at line 1429 of file maglev-graph-builder.h.

1429  {
1430  Handle<String> string_handle =
1432  str, AllocationType::kOld);
1433  ValueNode* string_node = graph()->GetConstant(MakeRefAssumeMemoryFence(
1434  broker(), broker()->CanonicalPersistentHandle(string_handle)));
1435  CHECK(BuildCallRuntime(Runtime::kGlobalPrint, {string_node}).IsDone());
1436  }
Handle< String > NewStringFromAsciiChecked(const char *str, AllocationType allocation=AllocationType::kYoung)
Definition: factory-base.h:325
v8::internal::LocalFactory * factory()
Definition: local-isolate.h:98

References broker(), CHECK, graph(), and v8::internal::compiler::MakeRefAssumeMemoryFence().

+ Here is the call graph for this function:

◆ Print() [2/3]

void v8::internal::maglev::MaglevGraphBuilder::Print ( const char *  str,
ValueNode value 
)
inlineprivate

Definition at line 1442 of file maglev-graph-builder.h.

1442  {
1443  Print(str);
1444  Print(value);
1445  }

References v8::internal::Print(), and v8::internal::value.

+ Here is the call graph for this function:

◆ Print() [3/3]

void v8::internal::maglev::MaglevGraphBuilder::Print ( ValueNode value)
inlineprivate

Definition at line 1438 of file maglev-graph-builder.h.

1438  {
1439  CHECK(BuildCallRuntime(Runtime::kDebugPrint, {value}).IsDone());
1440  }

References CHECK, and v8::internal::value.

◆ PrintVirtualObjects()

void v8::internal::maglev::MaglevGraphBuilder::PrintVirtualObjects ( )
inlineprivate

Definition at line 821 of file maglev-graph-builder.h.

821  {
822  if (!v8_flags.trace_maglev_graph_building) return;
824  std::cout, "* VOs (Interpreter Frame State): ", graph_labeller());
825  }
void Print(std::ostream &os, const char *prefix, MaglevGraphLabeller *labeller) const
Definition: maglev-ir.cc:330

References current_interpreter_frame_, graph_labeller(), v8::internal::maglev::VirtualObjectList::Print(), v8::internal::v8_flags, and v8::internal::maglev::InterpreterFrameState::virtual_objects().

Referenced by CreateEdgeSplitBlock(), and VisitSingleBytecode().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ProcessMergePoint()

void v8::internal::maglev::MaglevGraphBuilder::ProcessMergePoint ( int  offset,
bool  preserve_known_node_aspects 
)
inlineprivate

Definition at line 640 of file maglev-graph-builder.h.

640  {
641  // First copy the merge state to be the current state.
642  MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
644  preserve_known_node_aspects, zone());
645 
646  ProcessMergePointPredecessors(merge_state, jump_targets_[offset]);
647  }
void CopyFrom(const MaglevCompilationUnit &info, MergePointInterpreterFrameState &state, bool preserve_known_node_aspects, Zone *zone)
void ProcessMergePointPredecessors(MergePointInterpreterFrameState &merge_state, BasicBlockRef &jump_targets)

References compilation_unit_, v8::internal::maglev::InterpreterFrameState::CopyFrom(), current_interpreter_frame_, jump_targets_, merge_states_, ProcessMergePointPredecessors(), and zone().

Referenced by VisitSingleBytecode().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ProcessMergePointAtExceptionHandlerStart()

void v8::internal::maglev::MaglevGraphBuilder::ProcessMergePointAtExceptionHandlerStart ( int  offset)
inlineprivate

Definition at line 608 of file maglev-graph-builder.h.

608  {
610 
611  MergePointInterpreterFrameState& merge_state = *merge_states_[offset];
612  DCHECK_EQ(merge_state.predecessor_count(), 0);
613 
614  // Copy state.
616  // Expressions would have to be explicitly preserved across exceptions.
617  // However, at this point we do not know which ones might be used.
620 
621  // Merges aren't simple fallthroughs, so we should reset the checkpoint
622  // validity.
624 
625  // Register exception phis.
626  if (has_graph_labeller()) {
627  for (Phi* phi : *merge_states_[offset]->phis()) {
629  BytecodeOffset(offset),
631  if (v8_flags.trace_maglev_graph_building) {
632  std::cout << " " << phi << " "
633  << PrintNodeLabel(graph_labeller(), phi) << ": "
634  << PrintNode(graph_labeller(), phi) << std::endl;
635  }
636  }
637  }
638  }

References v8::internal::maglev::KnownNodeAspects::ClearAvailableExpressions(), compilation_unit_, v8::internal::maglev::InterpreterFrameState::CopyFrom(), current_allocation_block_, current_interpreter_frame_, current_source_position_, DCHECK_EQ, graph_labeller(), has_graph_labeller(), v8::internal::maglev::InterpreterFrameState::known_node_aspects(), merge_states_, v8::internal::maglev::MergePointInterpreterFrameState::predecessor_count(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::maglev::MaglevGraphLabeller::RegisterNode(), ResetBuilderCachedState(), and v8::internal::v8_flags.

Referenced by VisitSingleBytecode().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ProcessMergePointPredecessors()

void v8::internal::maglev::MaglevGraphBuilder::ProcessMergePointPredecessors ( MergePointInterpreterFrameState merge_state,
BasicBlockRef jump_targets 
)
inlineprivate

Definition at line 650 of file maglev-graph-builder.h.

652  {
653  // TODO(olivf): Support allocation folding across control flow.
655 
656  // Merges aren't simple fallthroughs, so we should reset state which is
657  // cached directly on the builder instead of on the merge states.
659 
660  if (merge_state.is_loop()) {
661  DCHECK_EQ(merge_state.predecessors_so_far(),
662  merge_state.predecessor_count() - 1);
663  } else {
664  DCHECK_EQ(merge_state.predecessors_so_far(),
665  merge_state.predecessor_count());
666  }
667 
668  if (merge_state.predecessor_count() == 1) return;
669 
670  // Set up edge-split.
671  int predecessor_index = merge_state.predecessor_count() - 1;
672  if (merge_state.is_loop()) {
673  // For loops, the JumpLoop block hasn't been generated yet, and so isn't
674  // in the list of jump targets. IT's the last predecessor, so drop the
675  // index by one.
676  DCHECK(merge_state.is_unmerged_loop());
677  predecessor_index--;
678  }
679  BasicBlockRef* old_jump_targets = jump_targets.Reset();
680  while (old_jump_targets != nullptr) {
681  BasicBlock* predecessor = merge_state.predecessor_at(predecessor_index);
682  CHECK(predecessor);
683  ControlNode* control = predecessor->control_node();
684  if (control->Is<ConditionalControlNode>()) {
685  // CreateEmptyBlock automatically registers itself with the offset.
686  predecessor = CreateEdgeSplitBlock(jump_targets, predecessor);
687  // Set the old predecessor's (the conditional block) reference to
688  // point to the new empty predecessor block.
689  old_jump_targets =
690  old_jump_targets->SetToBlockAndReturnNext(predecessor);
691  merge_state.set_predecessor_at(predecessor_index, predecessor);
692  } else {
693  // Re-register the block in the offset's ref list.
694  old_jump_targets = old_jump_targets->MoveToRefList(&jump_targets);
695  }
696  // We only set the predecessor id after splitting critical edges, to make
697  // sure the edge split blocks pick up the correct predecessor index.
698  predecessor->set_predecessor_id(predecessor_index--);
699  }
700  DCHECK_EQ(predecessor_index, -1);
701  RegisterPhisWithGraphLabeller(merge_state);
702  }
void RegisterPhisWithGraphLabeller(MergePointInterpreterFrameState &merge_state)
BasicBlock * CreateEdgeSplitBlock(BasicBlockRef &jump_targets, BasicBlock *predecessor)

References CHECK, v8::internal::maglev::BasicBlock::control_node(), CreateEdgeSplitBlock(), current_allocation_block_, v8::internal::DCHECK(), DCHECK_EQ, v8::internal::maglev::NodeBase::Is(), v8::internal::maglev::MergePointInterpreterFrameState::is_loop(), v8::internal::maglev::MergePointInterpreterFrameState::is_unmerged_loop(), v8::internal::maglev::BasicBlockRef::MoveToRefList(), v8::internal::maglev::MergePointInterpreterFrameState::predecessor_at(), v8::internal::maglev::MergePointInterpreterFrameState::predecessor_count(), v8::internal::maglev::MergePointInterpreterFrameState::predecessors_so_far(), RegisterPhisWithGraphLabeller(), v8::internal::maglev::BasicBlockRef::Reset(), ResetBuilderCachedState(), v8::internal::maglev::MergePointInterpreterFrameState::set_predecessor_at(), v8::internal::maglev::BasicBlock::set_predecessor_id(), and v8::internal::maglev::BasicBlockRef::SetToBlockAndReturnNext().

Referenced by ProcessMergePoint().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ RecordKnownProperty()

void v8::internal::maglev::MaglevGraphBuilder::RecordKnownProperty ( ValueNode lookup_start_object,
KnownNodeAspects::LoadedPropertyMapKey  key,
ValueNode value,
bool  is_const,
compiler::AccessMode  access_mode 
)
private

Definition at line 7351 of file maglev-graph-builder.cc.

7353  {
7354  DCHECK(!value->properties().is_conversion());
7355  KnownNodeAspects::LoadedPropertyMap& loaded_properties =
7358  // Try to get loaded_properties[key] if it already exists, otherwise
7359  // construct loaded_properties[key] = ZoneMap{zone()}.
7360  auto& props_for_key =
7361  loaded_properties.try_emplace(key, zone()).first->second;
7362 
7363  if (!is_const && IsAnyStore(access_mode)) {
7364  if (is_loop_effect_tracking()) {
7365  loop_effects_->keys_cleared.insert(key);
7366  }
7367  // We don't do any aliasing analysis, so stores clobber all other cached
7368  // loads of a property with that key. We only need to do this for
7369  // non-constant properties, since constant properties are known not to
7370  // change and therefore can't be clobbered.
7371  // TODO(leszeks): Do some light aliasing analysis here, e.g. checking
7372  // whether there's an intersection of known maps.
7373  if (v8_flags.trace_maglev_graph_building) {
7374  std::cout << " * Removing all non-constant cached ";
7375  switch (key.type()) {
7377  std::cout << "properties with name " << *key.name().object();
7378  break;
7380  std::cout << "Elements";
7381  break;
7383  std::cout << "TypedArray length";
7384  break;
7386  std::cout << "String length";
7387  break;
7388  }
7389  std::cout << std::endl;
7390  }
7391  props_for_key.clear();
7392  }
7393 
7394  if (v8_flags.trace_maglev_graph_building) {
7395  std::cout << " * Recording " << (is_const ? "constant" : "non-constant")
7396  << " known property "
7397  << PrintNodeLabel(graph_labeller(), lookup_start_object) << ": "
7398  << PrintNode(graph_labeller(), lookup_start_object) << " [";
7399  switch (key.type()) {
7401  std::cout << *key.name().object();
7402  break;
7404  std::cout << "Elements";
7405  break;
7407  std::cout << "TypedArray length";
7408  break;
7410  std::cout << "String length";
7411  break;
7412  }
7413  std::cout << "] = " << PrintNodeLabel(graph_labeller(), value) << ": "
7414  << PrintNode(graph_labeller(), value) << std::endl;
7415  }
7416 
7417  if (IsAnyStore(access_mode) && !is_const && is_loop_effect_tracking()) {
7418  auto updated = props_for_key.emplace(lookup_start_object, value);
7419  if (updated.second) {
7420  loop_effects_->objects_written.insert(lookup_start_object);
7421  } else if (updated.first->second != value) {
7422  updated.first->second = value;
7423  loop_effects_->objects_written.insert(lookup_start_object);
7424  }
7425  } else {
7426  props_for_key[lookup_start_object] = value;
7427  }
7428 }
bool IsAnyStore(AccessMode mode)
Definition: heap-refs.h:71
ZoneMap< LoadedPropertyMapKey, ZoneMap< ValueNode *, ValueNode * > > LoadedPropertyMap

References v8::internal::DCHECK(), v8::internal::compiler::IsAnyStore(), v8::internal::key, v8::internal::anonymous_namespace{ic.cc}::kName, v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::v8_flags, and v8::internal::value.

Referenced by BuildLoadJSArrayLength(), TryBuildPropertyLoad(), and TryBuildPropertyStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ RecordUseReprHint() [1/2]

void v8::internal::maglev::MaglevGraphBuilder::RecordUseReprHint ( Phi phi,
UseRepresentation  repr 
)
inline

Definition at line 400 of file maglev-graph-builder.h.

400  {
402  }
void RecordUseReprHint(Phi *phi, UseRepresentationSet reprs)
base::EnumSet< UseRepresentation, int8_t > UseRepresentationSet
Definition: maglev-ir.h:9835

References RecordUseReprHint().

+ Here is the call graph for this function:

◆ RecordUseReprHint() [2/2]

void v8::internal::maglev::MaglevGraphBuilder::RecordUseReprHint ( Phi phi,
UseRepresentationSet  reprs 
)
inline

Definition at line 397 of file maglev-graph-builder.h.

397  {
398  phi->RecordUseReprHint(reprs);
399  }

References v8::internal::maglev::Phi::RecordUseReprHint().

Referenced by v8::internal::maglev::MergePointInterpreterFrameState::MergeLoopValue(), RecordUseReprHint(), and RecordUseReprHintIfPhi().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ RecordUseReprHintIfPhi()

void v8::internal::maglev::MaglevGraphBuilder::RecordUseReprHintIfPhi ( ValueNode node,
UseRepresentation  repr 
)
inline

Definition at line 403 of file maglev-graph-builder.h.

403  {
404  if (Phi* phi = node->TryCast<Phi>()) {
405  RecordUseReprHint(phi, repr);
406  }
407  }

References RecordUseReprHint(), and v8::internal::maglev::NodeBase::TryCast().

Referenced by CanElideWriteBarrier(), GetFloat64(), GetHoleyFloat64ForToNumber(), GetInt32(), GetInt32ElementIndex(), GetSmiValue(), GetTaggedValue(), and GetTruncatedInt32ForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ReduceCall()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::ReduceCall ( ValueNode target_node,
CallArguments args,
const compiler::FeedbackSource feedback_source = compiler::FeedbackSource() 
)
private

Definition at line 11718 of file maglev-graph-builder.cc.

11720  {
11721  if (compiler::OptionalHeapObjectRef maybe_constant =
11722  TryGetConstant(target_node)) {
11723  if (maybe_constant->IsJSFunction()) {
11724  MaybeReduceResult result = TryReduceCallForTarget(
11725  target_node, maybe_constant->AsJSFunction(), args, feedback_source);
11727  }
11728  }
11729 
11730  // If the implementation here becomes more complex, we could probably
11731  // deduplicate the code for FastCreateClosure and CreateClosure by using
11732  // templates or giving them a shared base class.
11733  if (FastCreateClosure* fast_create_closure =
11734  target_node->TryCast<FastCreateClosure>()) {
11735  MaybeReduceResult result = TryReduceCallForNewClosure(
11736  fast_create_closure, fast_create_closure->context().node(),
11737 #ifdef V8_ENABLE_LEAPTIERING
11738  fast_create_closure->feedback_cell().dispatch_handle(),
11739 #endif
11740  fast_create_closure->shared_function_info(),
11741  fast_create_closure->feedback_cell(), args, feedback_source);
11743  } else if (CreateClosure* create_closure =
11744  target_node->TryCast<CreateClosure>()) {
11745  MaybeReduceResult result = TryReduceCallForNewClosure(
11746  create_closure, create_closure->context().node(),
11747 #ifdef V8_ENABLE_LEAPTIERING
11748  create_closure->feedback_cell().dispatch_handle(),
11749 #endif
11750  create_closure->shared_function_info(), create_closure->feedback_cell(),
11751  args, feedback_source);
11753  }
11754 
11755  // On fallthrough, create a generic call.
11756  return BuildGenericCall(target_node, Call::TargetType::kAny, args);
11757 }
MaybeReduceResult TryReduceCallForTarget(ValueNode *target_node, compiler::JSFunctionRef target, CallArguments &args, const compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryReduceCallForNewClosure(ValueNode *target_node, ValueNode *target_context, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
ValueNode * BuildGenericCall(ValueNode *target, Call::TargetType target_type, const CallArguments &args)

References v8::base::args, v8::base::internal::result, RETURN_IF_DONE, v8::internal::maglev::NodeBase::TryCast(), and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ ReduceCallWithArrayLike()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::ReduceCallWithArrayLike ( ValueNode target_node,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11702 of file maglev-graph-builder.cc.

11704  {
11706 
11707  // TODO(victorgomes): Add the case for JSArrays and Rest parameter.
11708  if (std::optional<VirtualObject*> arguments_object =
11709  TryGetNonEscapingArgumentsObject(args.array_like_argument())) {
11711  target_node, args, *arguments_object, feedback_source));
11712  }
11713 
11714  // On fallthrough, create a generic call.
11715  return BuildGenericCall(target_node, Call::TargetType::kAny, args);
11716 }
std::optional< VirtualObject * > TryGetNonEscapingArgumentsObject(ValueNode *value)
ReduceResult ReduceCallWithArrayLikeForArgumentsObject(ValueNode *target_node, CallArguments &args, VirtualObject *arguments_object, const compiler::FeedbackSource &feedback_source)

References v8::base::args, DCHECK_EQ, and RETURN_IF_DONE.

◆ ReduceCallWithArrayLikeForArgumentsObject()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::ReduceCallWithArrayLikeForArgumentsObject ( ValueNode target_node,
CallArguments args,
VirtualObject arguments_object,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11580 of file maglev-graph-builder.cc.

11583  {
11585  DCHECK(arguments_object->map().IsJSArgumentsObjectMap() ||
11586  arguments_object->map().IsJSArrayMap());
11587  args.PopArrayLikeArgument();
11588  ValueNode* elements_value =
11589  arguments_object->get(JSArgumentsObject::kElementsOffset);
11590  if (elements_value->Is<ArgumentsElements>()) {
11592  // TODO(victorgomes): Add JSFunction node type in KNA and use the info here.
11593  if (compiler::OptionalHeapObjectRef maybe_constant =
11594  TryGetConstant(target_node)) {
11595  if (maybe_constant->IsJSFunction()) {
11596  compiler::SharedFunctionInfoRef shared =
11597  maybe_constant->AsJSFunction().shared(broker());
11598  if (!IsClassConstructor(shared.kind())) {
11599  target_type = Call::TargetType::kJSFunction;
11600  }
11601  }
11602  }
11603  int start_index = 0;
11604  if (elements_value->Cast<ArgumentsElements>()->type() ==
11606  start_index =
11607  elements_value->Cast<ArgumentsElements>()->formal_parameter_count();
11608  }
11609  return AddNewCallNode<CallForwardVarargs>(args, GetTaggedValue(target_node),
11611  start_index, target_type);
11612  }
11613 
11614  if (elements_value->Is<RootConstant>()) {
11615  // It is a RootConstant, Elements can only be the empty fixed array.
11616  DCHECK_EQ(elements_value->Cast<RootConstant>()->index(),
11617  RootIndex::kEmptyFixedArray);
11618  CallArguments new_args(ConvertReceiverMode::kAny, {args.receiver()});
11619  return ReduceCall(target_node, new_args, feedback_source);
11620  }
11621 
11622  if (Constant* constant_value = elements_value->TryCast<Constant>()) {
11623  DCHECK(constant_value->object().IsFixedArray());
11624  compiler::FixedArrayRef elements = constant_value->object().AsFixedArray();
11625  base::SmallVector<ValueNode*, 8> arg_list;
11626  DCHECK_NOT_NULL(args.receiver());
11627  arg_list.push_back(args.receiver());
11628  for (int i = 0; i < static_cast<int>(args.count()); i++) {
11629  arg_list.push_back(args[i]);
11630  }
11631  for (uint32_t i = 0; i < elements.length(); i++) {
11632  arg_list.push_back(GetConstant(*elements.TryGet(broker(), i)));
11633  }
11634  CallArguments new_args(ConvertReceiverMode::kAny, std::move(arg_list));
11635  return ReduceCall(target_node, new_args, feedback_source);
11636  }
11637 
11638  DCHECK(elements_value->Is<InlinedAllocation>());
11639  InlinedAllocation* allocation = elements_value->Cast<InlinedAllocation>();
11640  VirtualObject* elements = allocation->object();
11641 
11642  base::SmallVector<ValueNode*, 8> arg_list;
11643  DCHECK_NOT_NULL(args.receiver());
11644  arg_list.push_back(args.receiver());
11645  for (int i = 0; i < static_cast<int>(args.count()); i++) {
11646  arg_list.push_back(args[i]);
11647  }
11648  DCHECK(elements->get(offsetof(FixedArray, length_))->Is<Int32Constant>());
11649  int length = elements->get(offsetof(FixedArray, length_))
11650  ->Cast<Int32Constant>()
11651  ->value();
11652  for (int i = 0; i < length; i++) {
11653  arg_list.push_back(elements->get(FixedArray::OffsetOfElementAt(i)));
11654  }
11655  CallArguments new_args(ConvertReceiverMode::kAny, std::move(arg_list));
11656  return ReduceCall(target_node, new_args, feedback_source);
11657 }

References v8::base::args, broker(), v8::internal::maglev::NodeBase::Cast(), v8::internal::DCHECK(), DCHECK_EQ, DCHECK_NOT_NULL, v8::internal::maglev::VirtualObject::get(), v8::internal::maglev::RootConstant::index(), v8::internal::maglev::NodeBase::Is(), v8::internal::IsClassConstructor(), v8::internal::kAny, v8::internal::kRestParameter, v8::internal::length, v8::internal::compiler::FixedArrayBaseRef::length(), v8::internal::maglev::VirtualObject::map(), v8::internal::compiler::FixedArrayRef::object(), v8::internal::maglev::InlinedAllocation::object(), v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), v8::base::SmallVector< T, kSize, Allocator >::push_back(), v8::internal::maglev::NodeBase::TryCast(), v8::internal::compiler::FixedArrayRef::TryGet(), v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant(), v8::internal::maglev::ArgumentsElements::type(), and v8::internal::value.

+ Here is the call graph for this function:

◆ register_count()

int v8::internal::maglev::MaglevGraphBuilder::register_count ( ) const
inlineprivate

Definition at line 3129 of file maglev-graph-builder.h.

Referenced by BuildRegisterFrameInitialization().

+ Here is the caller graph for this function:

◆ RegisterPhisWithGraphLabeller()

void v8::internal::maglev::MaglevGraphBuilder::RegisterPhisWithGraphLabeller ( MergePointInterpreterFrameState merge_state)
inlineprivate

Definition at line 704 of file maglev-graph-builder.h.

705  {
706  if (!has_graph_labeller()) return;
707 
708  for (Phi* phi : *merge_state.phis()) {
710  if (v8_flags.trace_maglev_graph_building) {
711  std::cout << " " << phi << " "
712  << PrintNodeLabel(graph_labeller(), phi) << ": "
713  << PrintNode(graph_labeller(), phi) << std::endl;
714  }
715  }
716  }

References graph_labeller(), has_graph_labeller(), v8::internal::maglev::MergePointInterpreterFrameState::phis(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::maglev::MaglevGraphLabeller::RegisterNode(), and v8::internal::v8_flags.

Referenced by ProcessMergePointPredecessors().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ ResetBuilderCachedState()

template<bool is_possible_map_change = true>
void v8::internal::maglev::MaglevGraphBuilder::ResetBuilderCachedState ( )
inlineprivate

Definition at line 1875 of file maglev-graph-builder.h.

1875  {
1877 
1878  // If a map might have changed, then we need to re-check it for for-in.
1879  // TODO(leszeks): Track this on merge states / known node aspects, rather
1880  // than on the graph, so that it can survive control flow.
1881  if constexpr (is_possible_map_change) {
1883  }
1884  }

Referenced by ProcessMergePointAtExceptionHandlerStart(), and ProcessMergePointPredecessors().

+ Here is the caller graph for this function:

◆ RuntimeFunctionCanThrow()

constexpr bool v8::internal::maglev::MaglevGraphBuilder::RuntimeFunctionCanThrow ( Runtime::FunctionId  function_id)
inlineconstexprprivate

Definition at line 1394 of file maglev-graph-builder.h.

1394  {
1395 #define BAILOUT(name, ...) \
1396  if (function_id == Runtime::k##name) { \
1397  return true; \
1398  }
1400 #undef BAILOUT
1401  return false;
1402  }
#define BAILOUT(name,...)
#define FOR_EACH_THROWING_INTRINSIC(F)
Definition: runtime.h:880

References BAILOUT, and FOR_EACH_THROWING_INTRINSIC.

◆ Select()

template<typename FCond , typename FTrue , typename FFalse >
ValueNode * v8::internal::maglev::MaglevGraphBuilder::Select ( FCond  cond,
FTrue  if_true,
FFalse  if_false 
)
private

Definition at line 847 of file maglev-graph-builder.cc.

848  {
849  MaglevSubGraphBuilder subgraph(this, 1);
850  MaglevSubGraphBuilder::Label else_branch(&subgraph, 1);
851  BranchBuilder builder(this, &subgraph, BranchType::kBranchIfFalse,
852  &else_branch);
853  BranchResult branch_result = cond(builder);
854  if (branch_result == BranchResult::kAlwaysTrue) {
855  return if_true();
856  }
857  if (branch_result == BranchResult::kAlwaysFalse) {
858  return if_false();
859  }
860  DCHECK(branch_result == BranchResult::kDefault);
862  MaglevSubGraphBuilder::Label done(&subgraph, 2, {&ret_val});
863  subgraph.set(ret_val, if_true());
864  subgraph.Goto(&done);
865  subgraph.Bind(&else_branch);
866  subgraph.set(ret_val, if_false());
867  subgraph.Goto(&done);
868  subgraph.Bind(&done);
869  return subgraph.get(ret_val);
870 }
SnapshotTable< OpIndex, VariableData >::Key Variable
Definition: operations.h:81

References v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(), v8::internal::DCHECK(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::get(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Goto(), kAlwaysFalse, kAlwaysTrue, kBranchIfFalse, kDefault, and v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::set().

Referenced by TryBuildNewConsString(), and TryReduceTypeOf().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ SelectReduction()

template<typename FCond , typename FTrue , typename FFalse >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::SelectReduction ( FCond  cond,
FTrue  if_true,
FFalse  if_false 
)
private

Definition at line 873 of file maglev-graph-builder.cc.

874  {
875  MaglevSubGraphBuilder subgraph(this, 1);
876  MaglevSubGraphBuilder::Label else_branch(&subgraph, 1);
877  BranchBuilder builder(this, &subgraph, BranchType::kBranchIfFalse,
878  &else_branch);
879  BranchResult branch_result = cond(builder);
880  if (branch_result == BranchResult::kAlwaysTrue) {
881  return if_true();
882  }
883  if (branch_result == BranchResult::kAlwaysFalse) {
884  return if_false();
885  }
886  DCHECK(branch_result == BranchResult::kDefault);
888  MaglevSubGraphBuilder::Label done(&subgraph, 2, {&ret_val});
889  MaybeReduceResult result_if_true = if_true();
890  CHECK(result_if_true.IsDone());
891  if (result_if_true.IsDoneWithValue()) {
892  subgraph.set(ret_val, result_if_true.value());
893  }
894  subgraph.GotoOrTrim(&done);
895  subgraph.Bind(&else_branch);
896  MaybeReduceResult result_if_false = if_false();
897  CHECK(result_if_false.IsDone());
898  if (result_if_true.IsDoneWithAbort() && result_if_false.IsDoneWithAbort()) {
900  }
901  if (result_if_false.IsDoneWithValue()) {
902  subgraph.set(ret_val, result_if_false.value());
903  }
904  subgraph.GotoOrTrim(&done);
905  subgraph.Bind(&done);
906  return subgraph.get(ret_val);
907 }

References v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(), CHECK, v8::internal::DCHECK(), v8::internal::maglev::ReduceResult::DoneWithAbort(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::get(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoOrTrim(), v8::internal::maglev::MaybeReduceResult::IsDone(), v8::internal::maglev::MaybeReduceResult::IsDoneWithAbort(), v8::internal::maglev::MaybeReduceResult::IsDoneWithValue(), kAlwaysFalse, kAlwaysTrue, kBranchIfFalse, kDefault, v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::set(), and v8::internal::maglev::MaybeReduceResult::value().

+ Here is the call graph for this function:

◆ set_current_block()

void v8::internal::maglev::MaglevGraphBuilder::set_current_block ( BasicBlock block)
inline

Definition at line 409 of file maglev-graph-builder.h.

409 { current_block_ = block; }

References current_block_.

Referenced by v8::internal::maglev::MaglevInliner::BuildInlineFunction().

+ Here is the caller graph for this function:

◆ SetAccumulator()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::SetAccumulator ( NodeT node)
inlineprivate

◆ SetAccumulatorInBranch()

void v8::internal::maglev::MaglevGraphBuilder::SetAccumulatorInBranch ( ValueNode value)
inlineprivate

Definition at line 1763 of file maglev-graph-builder.h.

1763  {
1764  DCHECK_IMPLIES(value->properties().can_lazy_deopt(),
1765  !IsNodeCreatedForThisBytecode(value));
1767  value);
1768  }

References DCHECK_IMPLIES, and v8::internal::value.

Referenced by v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::StartFallthroughBlock().

+ Here is the caller graph for this function:

◆ SetArgument()

void v8::internal::maglev::MaglevGraphBuilder::SetArgument ( int  i,
ValueNode value 
)

Definition at line 1064 of file maglev-graph-builder.cc.

References current_interpreter_frame_, v8::internal::interpreter::Register::FromParameterIndex(), v8::internal::maglev::InterpreterFrameState::set(), and v8::internal::value.

Referenced by Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ SetContext()

void v8::internal::maglev::MaglevGraphBuilder::SetContext ( ValueNode context)
inlineprivate

Definition at line 1462 of file maglev-graph-builder.h.

1462  {
1464  context);
1465  }

◆ SetKnownValue()

void v8::internal::maglev::MaglevGraphBuilder::SetKnownValue ( ValueNode node,
compiler::ObjectRef  constant,
NodeType  new_node_type 
)
private

Definition at line 4508 of file maglev-graph-builder.cc.

4509  {
4510  DCHECK(!node->Is<Constant>());
4511  DCHECK(!node->Is<RootConstant>());
4512  NodeInfo* known_info = GetOrCreateInfoFor(node);
4513  // ref type should be compatible with type.
4514  DCHECK(NodeTypeIs(StaticTypeForConstant(broker(), ref), new_node_type));
4515  if (ref.IsHeapObject()) {
4516  DCHECK(IsInstanceOfNodeType(ref.AsHeapObject().map(broker()),
4517  known_info->type(), broker()));
4518  } else {
4519  DCHECK(!NodeTypeIs(known_info->type(), NodeType::kAnyHeapObject));
4520  }
4521  known_info->IntersectType(new_node_type);
4522  known_info->alternative().set_checked_value(GetConstant(ref));
4523 }

References v8::internal::maglev::NodeInfo::alternative(), broker(), v8::internal::DCHECK(), GetConstant(), GetOrCreateInfoFor(), v8::internal::maglev::NodeInfo::IntersectType(), v8::internal::maglev::NodeBase::Is(), v8::internal::maglev::IsInstanceOfNodeType(), v8::internal::maglev::NodeTypeIs(), v8::internal::maglev::StaticTypeForConstant(), and v8::internal::maglev::NodeInfo::type().

+ Here is the call graph for this function:

◆ SetNodeInputs()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::SetNodeInputs ( NodeT node,
std::initializer_list< ValueNode * >  inputs 
)
inlineprivate

Definition at line 1953 of file maglev-graph-builder.h.

1953  {
1954  // Nodes with zero input count don't have kInputTypes defined.
1955  if constexpr (NodeT::kInputCount > 0) {
1956  constexpr UseReprHintRecording hint = ShouldRecordUseReprHint<NodeT>();
1957  int i = 0;
1958  for (ValueNode* input : inputs) {
1959  DCHECK_NOT_NULL(input);
1960  node->set_input(i, ConvertInputTo<hint>(input, NodeT::kInputTypes[i]));
1961  i++;
1962  }
1963  }
1964  }

References DCHECK_NOT_NULL.

Referenced by AddNewNode().

+ Here is the caller graph for this function:

◆ ShouldEagerInlineCall()

bool v8::internal::maglev::MaglevGraphBuilder::ShouldEagerInlineCall ( compiler::SharedFunctionInfoRef  shared)
private

Definition at line 8494 of file maglev-graph-builder.cc.

8495  {
8496  compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
8498  TRACE_INLINING(" greedy inlining "
8499  << shared << ": small function, skipping max-depth");
8500  return true;
8501  }
8502  return false;
8503 }
#define TRACE_INLINING(...)

References broker(), v8::internal::compiler::SharedFunctionInfoRef::GetBytecodeArray(), v8::internal::compiler::BytecodeArrayRef::length(), and TRACE_INLINING.

+ Here is the call graph for this function:

◆ ShouldEmitInterruptBudgetChecks()

bool v8::internal::maglev::MaglevGraphBuilder::ShouldEmitInterruptBudgetChecks ( )
inlineprivate

Definition at line 559 of file maglev-graph-builder.h.

559  {
560  if (is_inline()) {
561  return false;
562  }
563  if (is_turbolev()) {
564  // As the top-tier compiler, Turboshaft doesn't need interrupt budget
565  // checks.
566  return false;
567  }
568  return v8_flags.force_emit_interrupt_budget_checks || v8_flags.turbofan;
569  }

References is_inline(), is_turbolev(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ ShouldEmitOsrInterruptBudgetChecks()

bool v8::internal::maglev::MaglevGraphBuilder::ShouldEmitOsrInterruptBudgetChecks ( )
inlineprivate

Definition at line 570 of file maglev-graph-builder.h.

570  {
571  if (!v8_flags.turbofan || !v8_flags.use_osr || !v8_flags.osr_from_maglev)
572  return false;
573  if (!graph_->is_osr() && !v8_flags.always_osr_from_maglev) {
574  return false;
575  }
576  // TODO(olivf) OSR from maglev requires lazy recompilation (see
577  // CompileOptimizedOSRFromMaglev for details). Without this we end up in
578  // deopt loops, e.g., in chromium content_unittests.
580  return false;
581  }
582  // TODO(olivf) OSR'ing from inlined loops is something we might want, but
583  // can't with our current osr-from-maglev implementation. The reason is that
584  // we OSR up by first going down to the interpreter. For inlined loops this
585  // means we would deoptimize to the caller and then probably end up in the
586  // same maglev osr code again, before reaching the turbofan OSR code in the
587  // callee. The solution is to support osr from maglev without
588  // deoptimization.
589  return !is_inline();
590  }

References v8::internal::OptimizingCompileDispatcher::Enabled(), graph_, is_inline(), v8::internal::maglev::Graph::is_osr(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ ShouldRecordUseReprHint()

template<typename NodeT >
static constexpr UseReprHintRecording v8::internal::maglev::MaglevGraphBuilder::ShouldRecordUseReprHint ( )
inlinestaticconstexprprivate

Definition at line 1940 of file maglev-graph-builder.h.

1940  {
1941  // We do not record a Tagged use on Return, since they are never on the hot
1942  // path, and will lead to a maximum of one additional Tagging operation in
1943  // the worst case. This allows loop accumulator to be untagged even if they
1944  // are later returned.
1945  if constexpr (std::is_same_v<NodeT, Return>) {
1947  } else {
1949  }
1950  }

◆ StartFallthroughBlock()

void v8::internal::maglev::MaglevGraphBuilder::StartFallthroughBlock ( int  next_block_offset,
BasicBlock predecessor 
)
inlineprivate

Definition at line 2017 of file maglev-graph-builder.h.

2017  {
2018  // Start a new block for the fallthrough path, unless it's a merge point, in
2019  // which case we merge our state into it. That merge-point could also be a
2020  // loop header, in which case the merge state might not exist yet (if the
2021  // only predecessors are this path and the JumpLoop).
2023 
2024  if (predecessor_count(next_block_offset) == 1) {
2025  if (v8_flags.trace_maglev_graph_building) {
2026  std::cout << "== New block (single fallthrough) at "
2028  << "==" << std::endl;
2030  }
2031  StartNewBlock(next_block_offset, predecessor);
2032  } else {
2033  MergeIntoFrameState(predecessor, next_block_offset);
2034  }
2035  }
IndirectHandle< SharedFunctionInfo > object() const

References DCHECK_NULL, and v8::internal::v8_flags.

Referenced by v8::internal::maglev::MaglevGraphBuilder::BranchBuilder::Build().

+ Here is the caller graph for this function:

◆ StartNewBlock() [1/2]

void v8::internal::maglev::MaglevGraphBuilder::StartNewBlock ( BasicBlock predecessor,
MergePointInterpreterFrameState merge_state,
BasicBlockRef refs_to_block 
)
inlineprivate

Definition at line 1906 of file maglev-graph-builder.h.

1908  {
1910  current_block_ = zone()->New<BasicBlock>(merge_state, zone());
1911  if (merge_state == nullptr) {
1912  DCHECK_NOT_NULL(predecessor);
1913  current_block_->set_predecessor(predecessor);
1914  } else {
1915  merge_state->InitializeWithBasicBlock(current_block_);
1916  }
1917  refs_to_block.Bind(current_block_);
1918  }
void set_predecessor(BasicBlock *predecessor)

References v8::internal::maglev::BasicBlockRef::Bind(), DCHECK_NOT_NULL, DCHECK_NULL, and v8::internal::maglev::MergePointInterpreterFrameState::InitializeWithBasicBlock().

+ Here is the call graph for this function:

◆ StartNewBlock() [2/2]

void v8::internal::maglev::MaglevGraphBuilder::StartNewBlock ( int  offset,
BasicBlock predecessor 
)
inlineprivate

Definition at line 1902 of file maglev-graph-builder.h.

1902  {
1903  StartNewBlock(predecessor, merge_states_[offset], jump_targets_[offset]);
1904  }

Referenced by VisitSingleBytecode().

+ Here is the caller graph for this function:

◆ StartPrologue()

void v8::internal::maglev::MaglevGraphBuilder::StartPrologue ( )

Definition at line 1047 of file maglev-graph-builder.cc.

1047  {
1048  current_block_ = zone()->New<BasicBlock>(nullptr, zone());
1049 }

References current_block_, v8::internal::Zone::New(), and zone().

Referenced by Build().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ StoreAndCacheContextSlot()

ReduceResult v8::internal::maglev::MaglevGraphBuilder::StoreAndCacheContextSlot ( ValueNode context,
int  index,
ValueNode value,
ContextMode  context_mode 
)
private

Definition at line 3692 of file maglev-graph-builder.cc.

3693  {
3694  int offset = Context::OffsetOfElementAt(index);
3695  DCHECK_EQ(
3696  known_node_aspects().loaded_context_constants.count({context, offset}),
3697  0);
3698 
3699  Node* store = nullptr;
3700  if ((v8_flags.script_context_cells || v8_flags.function_context_cells) &&
3701  context_mode == ContextMode::kHasContextCells) {
3702  MaybeReduceResult result =
3705  if (!store && result.IsDone()) {
3706  // If we didn't need to emit any store, there is nothing to cache.
3707  return result.Checked();
3708  }
3709  }
3710 
3711  if (!store) {
3712  store = BuildStoreTaggedField(context, value, offset,
3714  }
3715 
3716  if (v8_flags.trace_maglev_graph_building) {
3717  std::cout << " * Recording context slot store "
3718  << PrintNodeLabel(graph_labeller(), context) << "[" << offset
3719  << "]: " << PrintNode(graph_labeller(), value) << std::endl;
3720  }
3721  KnownNodeAspects::LoadedContextSlots& loaded_context_slots =
3723  if (!loaded_context_slots.empty()) {
3725  broker(), local_isolate(), context);
3726  }
3727  if (known_node_aspects().may_have_aliasing_contexts() ==
3729  compiler::OptionalScopeInfoRef scope_info =
3730  graph()->TryGetScopeInfo(context);
3731  for (auto& cache : loaded_context_slots) {
3732  if (std::get<int>(cache.first) == offset &&
3733  std::get<ValueNode*>(cache.first) != context) {
3734  if (ContextMayAlias(std::get<ValueNode*>(cache.first), scope_info) &&
3735  cache.second != value) {
3736  if (v8_flags.trace_maglev_graph_building) {
3737  std::cout << " * Clearing probably aliasing value "
3738  << PrintNodeLabel(graph_labeller(),
3739  std::get<ValueNode*>(cache.first))
3740  << "[" << offset
3741  << "]: " << PrintNode(graph_labeller(), value)
3742  << std::endl;
3743  }
3744  cache.second = nullptr;
3745  if (is_loop_effect_tracking()) {
3746  loop_effects_->context_slot_written.insert(cache.first);
3748  }
3749  }
3750  }
3751  }
3752  }
3754  auto updated = loaded_context_slots.emplace(key, value);
3755  if (updated.second) {
3756  if (is_loop_effect_tracking()) {
3758  }
3760  } else {
3761  if (updated.first->second != value) {
3762  updated.first->second = value;
3763  if (is_loop_effect_tracking()) {
3765  }
3766  }
3767  if (known_node_aspects().may_have_aliasing_contexts() !=
3769  auto last_store = unobserved_context_slot_stores_.find(key);
3770  if (last_store != unobserved_context_slot_stores_.end()) {
3771  MarkNodeDead(last_store->second);
3772  last_store->second = store;
3773  } else {
3775  }
3776  }
3777  }
3778  return ReduceResult::Done();
3779 }
MaybeReduceResult TrySpecializeStoreContextSlot(ValueNode *context, int index, ValueNode *value, Node **store)
bool ContextMayAlias(ValueNode *context, compiler::OptionalScopeInfoRef scope_info)
too high values may cause the compiler to set high thresholds for inlining to as much as possible avoid inlined allocation of objects that cannot escape trace load stores from virtual maglev objects use TurboFan fast string builder analyze liveness of environment slots and zap dead values trace TurboFan load elimination emit data about basic block usage in builtins to this enable builtin reordering when run mksnapshot flag for emit warnings when applying builtin profile data verify register allocation in TurboFan randomly schedule instructions to stress dependency tracking enable store store elimination in TurboFan rewrite far to near simulate GC compiler thread race related to allow float parameters to be passed in simulator mode JS Wasm Run additional turbo_optimize_inlined_js_wasm_wrappers enables Turboshaft s StaticAssert and CheckTurboshaftTypeOf operations Wasm code into JS functions via the JS to Wasm wrappers are still inlined in TurboFan For controlling whether to at see turbo inline js wasm calls enable Turboshaft s loop unrolling enable an additional Turboshaft phase that performs optimizations based on type information enable Turbolev features that we want to ship in the not too far future trace individual Turboshaft reduction steps trace intermediate Turboshaft reduction steps trace Turboshaft s if else to switch reducer invocation count threshold for early optimization Enables optimizations which favor memory size over execution speed Enables sampling allocation profiler with X as a sample interval min size of a semi the new space consists of two semi spaces max size of the preconfigured old space Collect garbage after Collect garbage after keeps maps alive for< n > old space garbage collections print one detailed trace line in allocation gc speed threshold for starting incremental marking via a task in percent of available threshold for starting incremental marking immediately in percent of available Use a single schedule for determining a marking schedule between JS and C objects schedules the minor GC task with kUserVisible priority max worker number of concurrent for NumberOfWorkerThreads start background threads that allocate memory concurrent_array_buffer_sweeping use parallel threads to clear weak refs in the atomic pause trace progress of the incremental marking trace object counts and memory usage report a tick only when allocated zone memory changes by this amount TracingFlags::gc_stats store(v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) DEFINE_GENERIC_IMPLICATION(trace_gc_object_stats
ZoneMap< LoadedContextSlotsKey, ValueNode * > LoadedContextSlots
ZoneSet< KnownNodeAspects::LoadedContextSlotsKey > context_slot_written

References broker(), BuildStoreTaggedField(), v8::internal::maglev::LoopEffects::context_slot_written, ContextMayAlias(), DCHECK_EQ, v8::internal::maglev::ReduceResult::Done(), graph(), graph_labeller(), v8::internal::index, is_loop_effect_tracking(), v8::internal::maglev::kDefault, v8::internal::key, v8::internal::kHasContextCells, known_node_aspects(), v8::internal::maglev::KnownNodeAspects::kYes, v8::internal::maglev::KnownNodeAspects::loaded_context_slots, local_isolate(), loop_effects_, MarkNodeDead(), v8::internal::maglev::LoopEffects::may_have_aliasing_contexts, v8::internal::Context::OffsetOfElementAt(), v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::base::internal::result, RETURN_IF_ABORT, store(), v8::internal::maglev::Graph::TryGetScopeInfo(), TrySpecializeStoreContextSlot(), unobserved_context_slot_stores_, v8::internal::maglev::KnownNodeAspects::UpdateMayHaveAliasingContexts(), v8::internal::v8_flags, and v8::internal::value.

Referenced by BuildStoreContextSlot(), and TryBuildScriptContextStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ StoreRegister()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::StoreRegister ( interpreter::Register  target,
NodeT value 
)
inlineprivate

Definition at line 1751 of file maglev-graph-builder.h.

1751  {
1752  static_assert(std::is_base_of_v<ValueNode, NodeT>);
1753  DCHECK(HasOutputRegister(target));
1755 
1756  // Make sure the lazy deopt info of this value, if any, is registered as
1757  // mutating this register.
1758  DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
1759  IsNodeCreatedForThisBytecode(value),
1760  value->lazy_deopt_info()->IsResultRegister(target));
1761  }

References DCHECK, DCHECK_IMPLIES, and v8::internal::value.

◆ StoreRegisterPair()

template<typename NodeT >
void v8::internal::maglev::MaglevGraphBuilder::StoreRegisterPair ( std::pair< interpreter::Register, interpreter::Register target,
NodeT value 
)
inlineprivate

Definition at line 1771 of file maglev-graph-builder.h.

1773  {
1774  const interpreter::Register target0 = target.first;
1775  const interpreter::Register target1 = target.second;
1776 
1777  DCHECK_EQ(interpreter::Register(target0.index() + 1), target1);
1778  DCHECK_EQ(value->ReturnCount(), 2);
1779 
1780  if (!v8_flags.maglev_cse) {
1781  // TODO(olivf): CSE might deduplicate this value and the one below.
1782  DCHECK_NE(0, new_nodes_.count(value));
1783  }
1784  DCHECK(HasOutputRegister(target0));
1786 
1787  ValueNode* second_value = GetSecondValue(value);
1788  if (!v8_flags.maglev_cse) {
1789  DCHECK_NE(0, new_nodes_.count(second_value));
1790  }
1791  DCHECK(HasOutputRegister(target1));
1792  current_interpreter_frame_.set(target1, second_value);
1793 
1794  // Make sure the lazy deopt info of this value, if any, is registered as
1795  // mutating these registers.
1796  DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
1797  IsNodeCreatedForThisBytecode(value),
1798  value->lazy_deopt_info()->IsResultRegister(target0));
1799  DCHECK_IMPLIES(value->properties().can_lazy_deopt() &&
1800  IsNodeCreatedForThisBytecode(value),
1801  value->lazy_deopt_info()->IsResultRegister(target1));
1802  }
ValueNode * GetSecondValue(ValueNode *result)

References DCHECK, DCHECK_EQ, DCHECK_IMPLIES, DCHECK_NE, v8::internal::interpreter::Register::index(), v8::internal::v8_flags, and v8::internal::value.

+ Here is the call graph for this function:

◆ StringLengthStaticLowerBound()

size_t v8::internal::maglev::MaglevGraphBuilder::StringLengthStaticLowerBound ( ValueNode string,
int  max_depth = 2 
)
private

Definition at line 2782 of file maglev-graph-builder.cc.

2783  {
2784  if (auto maybe_constant = string->TryGetConstant(broker())) {
2785  if (maybe_constant->IsString()) {
2786  return maybe_constant->AsString().length();
2787  }
2788  }
2789  switch (string->opcode()) {
2790  case Opcode::kNumberToString:
2791  return 1;
2792  case Opcode::kInlinedAllocation:
2793  // TODO(olivf): Add a NodeType::kConsString instead of this check.
2794  if (string->Cast<InlinedAllocation>()->object()->type() ==
2796  return ConsString::kMinLength;
2797  }
2798  break;
2799  case Opcode::kStringConcat:
2800  if (max_depth == 0) return 0;
2801  return StringLengthStaticLowerBound(string->input(0).node(),
2802  max_depth - 1) +
2803  StringLengthStaticLowerBound(string->input(1).node(),
2804  max_depth - 1);
2805  case Opcode::kPhi: {
2806  // For the builder pattern where the inputs are cons strings, we will see
2807  // a phi from the Select that compares against the empty string. We
2808  // can refine the min_length by checking the phi strings. This might
2809  // help us elide the Select.
2810  if (max_depth == 0) return 0;
2811  auto phi = string->Cast<Phi>();
2812  if (phi->input_count() == 0 ||
2813  (phi->is_loop_phi() && phi->is_unmerged_loop_phi())) {
2814  return 0;
2815  }
2816  size_t overall_min_length =
2817  StringLengthStaticLowerBound(phi->input(0).node(), max_depth - 1);
2818  for (int i = 1; i < phi->input_count(); ++i) {
2819  size_t min =
2820  StringLengthStaticLowerBound(phi->input(i).node(), max_depth - 1);
2821  if (min < overall_min_length) {
2822  overall_min_length = min;
2823  }
2824  }
2825  return overall_min_length;
2826  }
2827  default:
2828  break;
2829  }
2830  return 0;
2831 }
static const uint32_t kMinLength
Definition: string.h:1043
size_t StringLengthStaticLowerBound(ValueNode *string, int max_depth=2)

References broker(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::maglev::VirtualObject::kConsString, v8::internal::ConsString::kMinLength, v8::internal::maglev::InlinedAllocation::object(), v8::internal::string, and v8::internal::maglev::VirtualObject::type().

Referenced by TryBuildNewConsString().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TargetIsCurrentCompilingUnit()

bool v8::internal::maglev::MaglevGraphBuilder::TargetIsCurrentCompilingUnit ( compiler::JSFunctionRef  target)
private

Definition at line 10971 of file maglev-graph-builder.cc.

10972  {
10974  return target.object().equals(
10976  }
10977  return target.object()->shared() ==
10978  compilation_unit_->info()->toplevel_function()->shared();
10979 }

References v8::internal::compiler::JSFunctionRef::object().

+ Here is the call graph for this function:

◆ TopLevelFunctionPassMaglevPrintFilter()

bool v8::internal::maglev::MaglevGraphBuilder::TopLevelFunctionPassMaglevPrintFilter ( )

Definition at line 8487 of file maglev-graph-builder.cc.

8487  {
8490  .object()
8491  ->PassesFilter(v8_flags.maglev_print_filter);
8492 }

References v8::internal::v8_flags.

◆ TryBuildAndAllocateJSGeneratorObject()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildAndAllocateJSGeneratorObject ( ValueNode closure,
ValueNode receiver 
)
private

Definition at line 12136 of file maglev-graph-builder.cc.

12137  {
12138  compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(closure);
12139  if (!maybe_constant.has_value()) return {};
12140  if (!maybe_constant->IsJSFunction()) return {};
12141  compiler::JSFunctionRef function = maybe_constant->AsJSFunction();
12142  if (!function.has_initial_map(broker())) return {};
12143 
12144  // Create the register file.
12145  compiler::SharedFunctionInfoRef shared = function.shared(broker());
12146  DCHECK(shared.HasBytecodeArray());
12147  compiler::BytecodeArrayRef bytecode_array = shared.GetBytecodeArray(broker());
12148  int parameter_count_no_receiver = bytecode_array.parameter_count() - 1;
12149  int length = parameter_count_no_receiver + bytecode_array.register_count();
12151  return {};
12152  }
12153  auto undefined = GetRootConstant(RootIndex::kUndefinedValue);
12154  VirtualObject* register_file =
12155  CreateFixedArray(broker()->fixed_array_map(), length);
12156  for (int i = 0; i < length; i++) {
12157  register_file->set(FixedArray::OffsetOfElementAt(i), undefined);
12158  }
12159 
12160  // Create the JS[Async]GeneratorObject instance.
12161  compiler::SlackTrackingPrediction slack_tracking_prediction =
12163  function);
12164  compiler::MapRef initial_map = function.initial_map(broker());
12165  VirtualObject* generator = CreateJSGeneratorObject(
12166  initial_map, slack_tracking_prediction.instance_size(), GetContext(),
12167  closure, receiver, register_file);
12168 
12169  // Handle in-object properties.
12170  for (int i = 0; i < slack_tracking_prediction.inobject_property_count();
12171  i++) {
12172  generator->set(initial_map.GetInObjectPropertyOffset(i), undefined);
12173  }
12174  generator->ClearSlots(
12175  initial_map.GetInObjectPropertyOffset(
12176  slack_tracking_prediction.inobject_property_count()),
12177  GetRootConstant(RootIndex::kOnePointerFillerMap));
12178 
12179  ValueNode* allocation =
12181  return allocation;
12182 }
VirtualObject * CreateJSGeneratorObject(compiler::MapRef map, int instance_size, ValueNode *context, ValueNode *closure, ValueNode *receiver, ValueNode *register_file)

References broker(), v8::internal::maglev::VirtualObject::ClearSlots(), v8::internal::DCHECK(), v8::internal::compiler::SharedFunctionInfoRef::GetBytecodeArray(), v8::internal::compiler::initial_map, v8::internal::compiler::SlackTrackingPrediction::inobject_property_count(), v8::internal::compiler::SlackTrackingPrediction::instance_size(), v8::internal::kMaxRegularHeapObjectSize, v8::internal::kYoung, v8::internal::length, v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), v8::internal::compiler::BytecodeArrayRef::parameter_count(), v8::internal::compiler::BytecodeArrayRef::register_count(), v8::internal::maglev::VirtualObject::set(), v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::SizeFor(), v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant(), and v8::internal::undefined.

+ Here is the call graph for this function:

◆ TryBuildCallKnownApiFunction()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildCallKnownApiFunction ( compiler::JSFunctionRef  function,
compiler::SharedFunctionInfoRef  shared,
CallArguments args 
)
private

Definition at line 11025 of file maglev-graph-builder.cc.

11027  {
11028  compiler::OptionalFunctionTemplateInfoRef maybe_function_template_info =
11029  shared.function_template_info(broker());
11030  if (!maybe_function_template_info.has_value()) {
11031  // Not an Api function.
11032  return {};
11033  }
11034 
11035  // See if we can optimize this API call.
11036  compiler::FunctionTemplateInfoRef function_template_info =
11037  maybe_function_template_info.value();
11038 
11039  compiler::HolderLookupResult api_holder;
11040  if (function_template_info.accept_any_receiver() &&
11041  function_template_info.is_signature_undefined(broker())) {
11042  // We might be able to optimize the API call depending on the
11043  // {function_template_info}.
11044  // If the API function accepts any kind of {receiver}, we only need to
11045  // ensure that the {receiver} is actually a JSReceiver at this point,
11046  // and also pass that as the {holder}. There are two independent bits
11047  // here:
11048  //
11049  // a. When the "accept any receiver" bit is set, it means we don't
11050  // need to perform access checks, even if the {receiver}'s map
11051  // has the "needs access check" bit set.
11052  // b. When the {function_template_info} has no signature, we don't
11053  // need to do the compatible receiver check, since all receivers
11054  // are considered compatible at that point, and the {receiver}
11055  // will be pass as the {holder}.
11056 
11057  api_holder =
11058  compiler::HolderLookupResult{CallOptimization::kHolderIsReceiver};
11059  } else {
11060  // Try to infer API holder from the known aspects of the {receiver}.
11061  api_holder =
11062  TryInferApiHolderValue(function_template_info, args.receiver());
11063  }
11064 
11065  switch (api_holder.lookup) {
11068  return TryReduceCallForApiFunction(function_template_info, shared, args);
11069 
11071  break;
11072  }
11073 
11074  // We don't have enough information to eliminate the access check
11075  // and/or the compatible receiver check, so use the generic builtin
11076  // that does those checks dynamically. This is still significantly
11077  // faster than the generic call sequence.
11078  Builtin builtin_name;
11079  // TODO(ishell): create no-profiling versions of kCallFunctionTemplate
11080  // builtins and use them here based on DependOnNoProfilingProtector()
11081  // dependency state.
11082  if (function_template_info.accept_any_receiver()) {
11083  DCHECK(!function_template_info.is_signature_undefined(broker()));
11084  builtin_name = Builtin::kCallFunctionTemplate_CheckCompatibleReceiver;
11085  } else if (function_template_info.is_signature_undefined(broker())) {
11086  builtin_name = Builtin::kCallFunctionTemplate_CheckAccess;
11087  } else {
11088  builtin_name =
11089  Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver;
11090  }
11091 
11092  // The CallFunctionTemplate builtin requires the {receiver} to be
11093  // an actual JSReceiver, so make sure we do the proper conversion
11094  // first if necessary.
11095  ValueNode* receiver = GetConvertReceiver(shared, args);
11096  int kContext = 1;
11097  int kFunctionTemplateInfo = 1;
11098  int kArgc = 1;
11099  return AddNewNode<CallBuiltin>(
11100  kFunctionTemplateInfo + kArgc + kContext + args.count_with_receiver(),
11101  [&](CallBuiltin* call_builtin) {
11102  int arg_index = 0;
11103  call_builtin->set_arg(arg_index++, GetConstant(function_template_info));
11104  call_builtin->set_arg(
11105  arg_index++,
11106  GetInt32Constant(JSParameterCount(static_cast<int>(args.count()))));
11107 
11108  call_builtin->set_arg(arg_index++, GetTaggedValue(receiver));
11109  for (int i = 0; i < static_cast<int>(args.count()); i++) {
11110  call_builtin->set_arg(arg_index++, GetTaggedValue(args[i]));
11111  }
11112  },
11113  builtin_name, GetTaggedValue(GetContext()));
11114 }
MaybeReduceResult TryReduceCallForApiFunction(compiler::FunctionTemplateInfoRef api_callback, compiler::OptionalSharedFunctionInfoRef maybe_shared, CallArguments &args)
compiler::HolderLookupResult TryInferApiHolderValue(compiler::FunctionTemplateInfoRef function_template_info, ValueNode *receiver)

References v8::internal::compiler::FunctionTemplateInfoRef::accept_any_receiver(), v8::base::args, broker(), v8::internal::DCHECK(), v8::internal::compiler::SharedFunctionInfoRef::function_template_info(), v8::internal::compiler::FunctionTemplateInfoRef::is_signature_undefined(), v8::internal::CallOptimization::kHolderFound, v8::internal::CallOptimization::kHolderIsReceiver, v8::internal::CallOptimization::kHolderNotFound, and v8::internal::compiler::HolderLookupResult::lookup.

+ Here is the call graph for this function:

◆ TryBuildCallKnownJSFunction() [1/2]

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildCallKnownJSFunction ( compiler::JSFunctionRef  function,
ValueNode new_target,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11116 of file maglev-graph-builder.cc.

11118  {
11119  // Don't inline CallFunction stub across native contexts.
11120  if (function.native_context(broker()) != broker()->target_native_context()) {
11121  return {};
11122  }
11123  compiler::SharedFunctionInfoRef shared = function.shared(broker());
11124  RETURN_IF_DONE(TryBuildCallKnownApiFunction(function, shared, args));
11125 
11126  ValueNode* closure = GetConstant(function);
11127  compiler::ContextRef context = function.context(broker());
11128  ValueNode* context_node = GetConstant(context);
11129  MaybeReduceResult res;
11130  if (MaglevIsTopTier() && TargetIsCurrentCompilingUnit(function) &&
11131  !graph_->is_osr()) {
11132  DCHECK(!shared.HasBuiltinId());
11133  res = BuildCallSelf(context_node, closure, new_target, shared, args);
11134  } else {
11136  context_node, closure, new_target,
11137 #ifdef V8_ENABLE_LEAPTIERING
11138  function.dispatch_handle(),
11139 #endif
11140  shared, function.raw_feedback_cell(broker()), args, feedback_source);
11141  }
11142  return res;
11143 }
bool TargetIsCurrentCompilingUnit(compiler::JSFunctionRef target)
MaybeReduceResult TryBuildCallKnownApiFunction(compiler::JSFunctionRef function, compiler::SharedFunctionInfoRef shared, CallArguments &args)
ValueNode * BuildCallSelf(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, CallArguments &args)

References v8::base::args, broker(), v8::internal::DCHECK(), v8::internal::native_context, v8::internal::raw_feedback_cell, and RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ TryBuildCallKnownJSFunction() [2/2]

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildCallKnownJSFunction ( ValueNode context,
ValueNode function,
ValueNode new_target,
compiler::SharedFunctionInfoRef  shared,
compiler::FeedbackCellRef  feedback_cell,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11196 of file maglev-graph-builder.cc.

11203  {
11204  if (v8_flags.maglev_inlining) {
11205  RETURN_IF_DONE(TryBuildInlineCall(context, function, new_target,
11206 #ifdef V8_ENABLE_LEAPTIERING
11207  dispatch_handle,
11208 #endif
11209  shared, feedback_cell, args,
11210  feedback_source));
11211  }
11212  return BuildCallKnownJSFunction(context, function, new_target,
11213 #ifdef V8_ENABLE_LEAPTIERING
11214  dispatch_handle,
11215 #endif
11216  shared, feedback_cell, args, feedback_source);
11217 }
CallKnownJSFunction * BuildCallKnownJSFunction(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryBuildInlineCall(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, const compiler::FeedbackSource &feedback_source)

References v8::base::args, RETURN_IF_DONE, and v8::internal::v8_flags.

◆ TryBuildCheckInt32Condition()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildCheckInt32Condition ( ValueNode lhs,
ValueNode rhs,
AssertCondition  condition,
DeoptimizeReason  reason,
bool  allow_unconditional_deopt = true 
)
private

Definition at line 6266 of file maglev-graph-builder.cc.

6268  {
6269  auto lhs_const = TryGetInt32Constant(lhs);
6270  if (lhs_const) {
6271  auto rhs_const = TryGetInt32Constant(rhs);
6272  if (rhs_const) {
6273  if (CheckConditionIn32(lhs_const.value(), rhs_const.value(), condition)) {
6274  return ReduceResult::Done();
6275  }
6276  if (allow_unconditional_deopt) {
6277  return EmitUnconditionalDeopt(reason);
6278  }
6279  }
6280  }
6281  AddNewNode<CheckInt32Condition>({lhs, rhs}, condition, reason);
6282  return ReduceResult::Done();
6283 }
bool CheckConditionIn32(int32_t lhs, int32_t rhs, AssertCondition condition)

References v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::CheckConditionIn32().

Referenced by TryBuildNewConsString().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildElementAccess()

template<typename GenericAccessFunc >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildElementAccess ( ValueNode object,
ValueNode index,
compiler::ElementAccessFeedback const &  feedback,
compiler::FeedbackSource const &  feedback_source,
GenericAccessFunc &&  build_generic_access 
)
private

Definition at line 6772 of file maglev-graph-builder.cc.

6776  {
6777  const compiler::KeyedAccessMode& keyed_mode = feedback.keyed_mode();
6778  // Check for the megamorphic case.
6779  if (feedback.transition_groups().empty()) {
6780  if (keyed_mode.access_mode() == compiler::AccessMode::kLoad) {
6781  return BuildCallBuiltin<Builtin::kKeyedLoadIC_Megamorphic>(
6782  {GetTaggedValue(object), GetTaggedValue(index_object)},
6783  feedback_source);
6784  } else if (keyed_mode.access_mode() == compiler::AccessMode::kStore) {
6785  return BuildCallBuiltin<Builtin::kKeyedStoreIC_Megamorphic>(
6786  {GetTaggedValue(object), GetTaggedValue(index_object),
6788  feedback_source);
6789  }
6790  return {};
6791  }
6792 
6793  NodeInfo* object_info = known_node_aspects().TryGetInfoFor(object);
6794  compiler::ElementAccessFeedback refined_feedback =
6795  object_info && object_info->possible_maps_are_known()
6796  ? feedback.Refine(broker(), object_info->possible_maps())
6797  : feedback;
6798 
6799  if (refined_feedback.HasOnlyStringMaps(broker())) {
6800  return TryBuildElementAccessOnString(object, index_object, refined_feedback,
6801  keyed_mode);
6802  }
6803 
6804  compiler::AccessInfoFactory access_info_factory(broker(), zone());
6805  ZoneVector<compiler::ElementAccessInfo> access_infos(zone());
6806  if (!access_info_factory.ComputeElementAccessInfos(refined_feedback,
6807  &access_infos) ||
6808  access_infos.empty()) {
6809  return {};
6810  }
6811 
6812  // TODO(leszeks): This is copied without changes from TurboFan's native
6813  // context specialization. We should figure out a way to share this code.
6814  //
6815  // For holey stores or growing stores, we need to check that the prototype
6816  // chain contains no setters for elements, and we need to guard those checks
6817  // via code dependencies on the relevant prototype maps.
6818  if (keyed_mode.access_mode() == compiler::AccessMode::kStore) {
6819  // TODO(v8:7700): We could have a fast path here, that checks for the
6820  // common case of Array or Object prototype only and therefore avoids
6821  // the zone allocation of this vector.
6822  ZoneVector<compiler::MapRef> prototype_maps(zone());
6823  for (compiler::ElementAccessInfo const& access_info : access_infos) {
6824  for (compiler::MapRef receiver_map :
6825  access_info.lookup_start_object_maps()) {
6826  // If the {receiver_map} has a prototype and its elements backing
6827  // store is either holey, or we have a potentially growing store,
6828  // then we need to check that all prototypes have stable maps with
6829  // with no element accessors and no throwing behavior for elements (and
6830  // we need to guard against changes to that below).
6831  if ((IsHoleyOrDictionaryElementsKind(receiver_map.elements_kind()) ||
6832  StoreModeCanGrow(refined_feedback.keyed_mode().store_mode())) &&
6833  !receiver_map.PrototypesElementsDoNotHaveAccessorsOrThrow(
6834  broker(), &prototype_maps)) {
6835  return {};
6836  }
6837 
6838  // TODO(v8:12547): Support writing to objects in shared space, which
6839  // need a write barrier that calls Object::Share to ensure the RHS is
6840  // shared.
6841  if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(
6842  receiver_map.instance_type())) {
6843  return {};
6844  }
6845  }
6846  }
6847  for (compiler::MapRef prototype_map : prototype_maps) {
6848  broker()->dependencies()->DependOnStableMap(prototype_map);
6849  }
6850  }
6851 
6852  // Check for monomorphic case.
6853  if (access_infos.size() == 1) {
6854  compiler::ElementAccessInfo const& access_info = access_infos.front();
6855  // TODO(victorgomes): Support RAB/GSAB backed typed arrays.
6856  if (IsRabGsabTypedArrayElementsKind(access_info.elements_kind())) {
6857  return {};
6858  }
6859 
6860  if (!access_info.transition_sources().empty()) {
6861  compiler::MapRef transition_target =
6862  access_info.lookup_start_object_maps().front();
6863  const ZoneVector<compiler::MapRef>& transition_sources =
6864  access_info.transition_sources();
6865 
6866  // There are no transitions in heap number maps. If `object` is a SMI, we
6867  // would anyway fail the transition and deopt later.
6868  DCHECK_NE(transition_target.instance_type(),
6869  InstanceType::HEAP_NUMBER_TYPE);
6870 #ifdef DEBUG
6871  for (auto& transition_source : transition_sources) {
6872  DCHECK_NE(transition_source.instance_type(),
6873  InstanceType::HEAP_NUMBER_TYPE);
6874  }
6875 #endif // DEBUG
6876 
6878  ValueNode* object_map =
6880 
6882  object, object_map, transition_sources, transition_target));
6883  } else {
6885  object, base::VectorOf(access_info.lookup_start_object_maps())));
6886  }
6887  if (IsTypedArrayElementsKind(access_info.elements_kind())) {
6888  return TryBuildElementAccessOnTypedArray(object, index_object,
6889  access_info, keyed_mode);
6890  }
6891  return TryBuildElementAccessOnJSArrayOrJSObject(object, index_object,
6892  access_info, keyed_mode);
6893  } else {
6894  return TryBuildPolymorphicElementAccess(object, index_object, keyed_mode,
6895  access_infos, build_generic_access);
6896  }
6897 }
MaybeReduceResult TryBuildPolymorphicElementAccess(ValueNode *object, ValueNode *index, const compiler::KeyedAccessMode &keyed_mode, const ZoneVector< compiler::ElementAccessInfo > &access_infos, GenericAccessFunc &&build_generic_access)
MaybeReduceResult TryBuildElementAccessOnTypedArray(ValueNode *object, ValueNode *index, const compiler::ElementAccessInfo &access_info, compiler::KeyedAccessMode const &keyed_mode)
MaybeReduceResult TryBuildElementAccessOnJSArrayOrJSObject(ValueNode *object, ValueNode *index, const compiler::ElementAccessInfo &access_info, compiler::KeyedAccessMode const &keyed_mode)
MaybeReduceResult TryBuildElementAccessOnString(ValueNode *object, ValueNode *index, const compiler::ElementAccessFeedback &access_info, compiler::KeyedAccessMode const &keyed_mode)
ReduceResult BuildTransitionElementsKindOrCheckMap(ValueNode *heap_object, ValueNode *object_map, const ZoneVector< compiler::MapRef > &transition_sources, compiler::MapRef transition_target)
ReduceResult BuildCheckMaps(ValueNode *object, base::Vector< const compiler::MapRef > maps, std::optional< ValueNode * > map={}, bool has_deprecated_map_without_migration_target=false, bool migration_done_outside=false)
ReduceResult BuildCheckHeapObject(ValueNode *object)
constexpr Vector< T > VectorOf(T *start, size_t size)
Definition: vector.h:359
constexpr bool IsTypedArrayElementsKind(ElementsKind kind)
bool StoreModeCanGrow(KeyedAccessStoreMode store_mode)
Definition: globals.h:2760
bool IsHoleyOrDictionaryElementsKind(ElementsKind kind)

References v8::internal::compiler::KeyedAccessMode::access_mode(), broker(), v8::internal::compiler::AccessInfoFactory::ComputeElementAccessInfos(), DCHECK_NE, v8::internal::compiler::ElementAccessInfo::elements_kind(), v8::internal::ZoneVector< T >::empty(), v8::internal::ZoneVector< T >::front(), v8::internal::compiler::ElementAccessFeedback::HasOnlyStringMaps(), v8::internal::compiler::MapRef::instance_type(), v8::internal::IsHoleyOrDictionaryElementsKind(), v8::internal::IsRabGsabTypedArrayElementsKind(), v8::internal::IsTypedArrayElementsKind(), v8::internal::compiler::ElementAccessFeedback::keyed_mode(), v8::internal::compiler::kLoad, v8::internal::HeapObject::kMapOffset, v8::internal::compiler::kStore, v8::internal::compiler::ElementAccessInfo::lookup_start_object_maps(), v8::internal::maglev::NodeInfo::possible_maps(), v8::internal::maglev::NodeInfo::possible_maps_are_known(), v8::internal::compiler::ElementAccessFeedback::Refine(), RETURN_IF_ABORT, v8::internal::ZoneVector< T >::size(), v8::internal::compiler::KeyedAccessMode::store_mode(), v8::internal::StoreModeCanGrow(), v8::internal::compiler::ElementAccessFeedback::transition_groups(), v8::internal::compiler::ElementAccessInfo::transition_sources(), and v8::base::VectorOf().

+ Here is the call graph for this function:

◆ TryBuildElementAccessOnJSArrayOrJSObject()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildElementAccessOnJSArrayOrJSObject ( ValueNode object,
ValueNode index,
const compiler::ElementAccessInfo access_info,
compiler::KeyedAccessMode const &  keyed_mode 
)
private

Definition at line 6739 of file maglev-graph-builder.cc.

6742  {
6743  if (!IsFastElementsKind(access_info.elements_kind())) {
6744  return {};
6745  }
6746  if (access_info.is_proxy_on_prototype()) {
6747  // TODO(jkummerow): Implement support.
6748  return {};
6749  }
6750  switch (keyed_mode.access_mode()) {
6753  object, index_object,
6754  base::VectorOf(access_info.lookup_start_object_maps()),
6755  access_info.elements_kind(), keyed_mode.load_mode());
6758  base::Vector<const compiler::MapRef> maps =
6759  base::VectorOf(access_info.lookup_start_object_maps());
6760  ElementsKind elements_kind = access_info.elements_kind();
6761  return TryBuildElementStoreOnJSArrayOrJSObject(object, index_object,
6762  GetAccumulator(), maps,
6763  elements_kind, keyed_mode);
6764  }
6765  default:
6766  // TODO(victorgomes): Implement more access types.
6767  return {};
6768  }
6769 }
MaybeReduceResult TryBuildElementLoadOnJSArrayOrJSObject(ValueNode *object, ValueNode *index, base::Vector< const compiler::MapRef > maps, ElementsKind kind, KeyedAccessLoadMode load_mode)
MaybeReduceResult TryBuildElementStoreOnJSArrayOrJSObject(ValueNode *object, ValueNode *index_object, ValueNode *value, base::Vector< const compiler::MapRef > maps, ElementsKind kind, const compiler::KeyedAccessMode &keyed_mode)
constexpr bool IsFastElementsKind(ElementsKind kind)

References v8::internal::compiler::KeyedAccessMode::access_mode(), v8::internal::compiler::ElementAccessInfo::elements_kind(), v8::internal::compiler::ElementAccessInfo::is_proxy_on_prototype(), v8::internal::IsFastElementsKind(), v8::internal::compiler::kLoad, v8::internal::compiler::kStore, v8::internal::compiler::kStoreInLiteral, v8::internal::compiler::KeyedAccessMode::load_mode(), v8::internal::compiler::ElementAccessInfo::lookup_start_object_maps(), and v8::base::VectorOf().

+ Here is the call graph for this function:

◆ TryBuildElementAccessOnString()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildElementAccessOnString ( ValueNode object,
ValueNode index,
const compiler::ElementAccessFeedback access_info,
compiler::KeyedAccessMode const &  keyed_mode 
)
private

Definition at line 6134 of file maglev-graph-builder.cc.

6137  {
6138  // Strings are immutable and `in` cannot be used on strings
6139  if (keyed_mode.access_mode() != compiler::AccessMode::kLoad) {
6140  return {};
6141  }
6142 
6143  RETURN_IF_DONE(TryReduceConstantStringAt(object, index_object,
6145 
6146  // Ensure that {object} is actually a String.
6147  bool is_one_byte_seq_string =
6148  v8_flags.specialize_code_for_one_byte_seq_strings &&
6149  base::all_of(access_feedback.transition_groups(), [](const auto& group) {
6150  return base::all_of(group, [](compiler::MapRef map) {
6151  return map.IsSeqStringMap() && map.IsOneByteStringMap();
6152  });
6153  });
6154  if (is_one_byte_seq_string) {
6156  } else {
6158  }
6159 
6160  ValueNode* length = BuildLoadStringLength(object);
6161  ValueNode* index = GetInt32ElementIndex(index_object);
6162  auto emit_load = [&]() -> ValueNode* {
6163  if (is_one_byte_seq_string) {
6164  return AddNewNode<SeqOneByteStringAt>({object, index});
6165  } else {
6166  return AddNewNode<StringAt>({object, index});
6167  }
6168  };
6169 
6170  if (LoadModeHandlesOOB(keyed_mode.load_mode()) &&
6171  broker()->dependencies()->DependOnNoElementsProtector()) {
6172  ValueNode* positive_index;
6174  ValueNode* uint32_length = AddNewNode<UnsafeInt32ToUint32>({length});
6175  return Select(
6176  [&](auto& builder) {
6178  positive_index, uint32_length);
6179  },
6180  emit_load, [&] { return GetRootConstant(RootIndex::kUndefinedValue); });
6181  } else {
6185  return emit_load();
6186  }
6187 }
BranchResult BuildBranchIfUint32Compare(BranchBuilder &builder, Operation op, ValueNode *lhs, ValueNode *rhs)
MaybeReduceResult TryBuildCheckInt32Condition(ValueNode *lhs, ValueNode *rhs, AssertCondition condition, DeoptimizeReason reason, bool allow_unconditional_deopt=true)
ValueNode * BuildLoadStringLength(ValueNode *string)
ReduceResult BuildCheckSeqOneByteString(ValueNode *object)
MaybeReduceResult TryReduceConstantStringAt(ValueNode *object, ValueNode *index, StringAtOOBMode oob_mode)
bool LoadModeHandlesOOB(KeyedAccessLoadMode load_mode)
Definition: globals.h:2707

References v8::internal::compiler::KeyedAccessMode::access_mode(), v8::base::all_of(), kElement, v8::internal::compiler::kLoad, RETURN_IF_DONE, v8::internal::compiler::ElementAccessFeedback::transition_groups(), TryReduceConstantStringAt(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ TryBuildElementAccessOnTypedArray()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildElementAccessOnTypedArray ( ValueNode object,
ValueNode index,
const compiler::ElementAccessInfo access_info,
compiler::KeyedAccessMode const &  keyed_mode 
)
private

Definition at line 6474 of file maglev-graph-builder.cc.

6477  {
6479  base::VectorOf(access_info.lookup_start_object_maps())));
6480  ElementsKind elements_kind = access_info.elements_kind();
6481  if (elements_kind == FLOAT16_ELEMENTS ||
6482  elements_kind == BIGUINT64_ELEMENTS ||
6483  elements_kind == BIGINT64_ELEMENTS) {
6484  return {};
6485  }
6486  if (keyed_mode.access_mode() == compiler::AccessMode::kLoad &&
6487  LoadModeHandlesOOB(keyed_mode.load_mode())) {
6488  // TODO(victorgomes): Handle OOB mode.
6489  return {};
6490  }
6491  if (keyed_mode.access_mode() == compiler::AccessMode::kStore &&
6492  StoreModeIgnoresTypeArrayOOB(keyed_mode.store_mode())) {
6493  // TODO(victorgomes): Handle OOB mode.
6494  return {};
6495  }
6496  if (keyed_mode.access_mode() == compiler::AccessMode::kStore &&
6497  elements_kind == UINT8_CLAMPED_ELEMENTS &&
6498  !IsSupported(CpuOperation::kFloat64Round)) {
6499  // TODO(victorgomes): Technically we still support if value (in the
6500  // accumulator) is of type int32. It would be nice to have a roll back
6501  // mechanism instead, so that we do not need to check this early.
6502  return {};
6503  }
6504  if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
6505  // TODO(leszeks): Eliminate this check.
6506  AddNewNode<CheckTypedArrayNotDetached>({object});
6507  }
6508  ValueNode* index;
6509  ValueNode* length;
6511  GET_VALUE_OR_ABORT(length, BuildLoadTypedArrayLength(object, elements_kind));
6512  AddNewNode<CheckTypedArrayBounds>({index, length});
6513  switch (keyed_mode.access_mode()) {
6515  DCHECK(!LoadModeHandlesOOB(keyed_mode.load_mode()));
6516  if (auto constant = object->TryCast<Constant>()) {
6517  compiler::HeapObjectRef constant_object = constant->object();
6518  if (constant_object.IsJSTypedArray() &&
6519  constant_object.AsJSTypedArray().is_off_heap_non_rab_gsab(
6520  broker())) {
6522  constant_object.AsJSTypedArray(), index, elements_kind);
6523  }
6524  }
6525  return BuildLoadTypedArrayElement(object, index, elements_kind);
6527  DCHECK(StoreModeIsInBounds(keyed_mode.store_mode()));
6528  if (auto constant = object->TryCast<Constant>()) {
6529  compiler::HeapObjectRef constant_object = constant->object();
6530  if (constant_object.IsJSTypedArray() &&
6531  constant_object.AsJSTypedArray().is_off_heap_non_rab_gsab(
6532  broker())) {
6533  BuildStoreConstantTypedArrayElement(constant_object.AsJSTypedArray(),
6534  index, elements_kind);
6535  return ReduceResult::Done();
6536  }
6537  }
6538  BuildStoreTypedArrayElement(object, index, elements_kind);
6539  return ReduceResult::Done();
6541  // TODO(victorgomes): Implement has element access.
6542  return {};
6545  UNREACHABLE();
6546  }
6547 }
void BuildStoreConstantTypedArrayElement(compiler::JSTypedArrayRef typed_array, ValueNode *index, ElementsKind elements_kind)
ValueNode * BuildLoadTypedArrayElement(ValueNode *object, ValueNode *index, ElementsKind elements_kind)
void BuildStoreTypedArrayElement(ValueNode *object, ValueNode *index, ElementsKind elements_kind)
ValueNode * BuildLoadConstantTypedArrayElement(compiler::JSTypedArrayRef typed_array, ValueNode *index, ElementsKind elements_kind)
ReduceResult BuildLoadTypedArrayLength(ValueNode *object, ElementsKind elements_kind)
bool HasOnlyJSTypedArrayMaps(base::Vector< const compiler::MapRef > maps)
Definition: maglev-ir.h:1049
bool StoreModeIsInBounds(KeyedAccessStoreMode store_mode)
Definition: globals.h:2742
bool StoreModeIgnoresTypeArrayOOB(KeyedAccessStoreMode store_mode)
Definition: globals.h:2756

References v8::internal::compiler::KeyedAccessMode::access_mode(), broker(), v8::internal::DCHECK(), v8::internal::compiler::ElementAccessInfo::elements_kind(), GET_VALUE_OR_ABORT, v8::internal::maglev::HasOnlyJSTypedArrayMaps(), v8::internal::index, v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::IsSupported(), v8::internal::compiler::kDefine, v8::internal::compiler::kHas, v8::internal::compiler::kLoad, v8::internal::compiler::kStore, v8::internal::compiler::kStoreInLiteral, v8::internal::length, v8::internal::compiler::KeyedAccessMode::load_mode(), v8::internal::LoadModeHandlesOOB(), v8::internal::compiler::ElementAccessInfo::lookup_start_object_maps(), v8::internal::compiler::KeyedAccessMode::store_mode(), v8::internal::StoreModeIgnoresTypeArrayOOB(), v8::internal::StoreModeIsInBounds(), v8::internal::maglev::NodeBase::TryCast(), v8::internal::maglev::UINT8_CLAMPED_ELEMENTS, v8::internal::UNREACHABLE(), and v8::base::VectorOf().

+ Here is the call graph for this function:

◆ TryBuildElementLoadOnJSArrayOrJSObject()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildElementLoadOnJSArrayOrJSObject ( ValueNode object,
ValueNode index,
base::Vector< const compiler::MapRef maps,
ElementsKind  kind,
KeyedAccessLoadMode  load_mode 
)
private

Definition at line 6549 of file maglev-graph-builder.cc.

6552  {
6553  DCHECK(IsFastElementsKind(elements_kind));
6554  bool is_jsarray = HasOnlyJSArrayMaps(maps);
6555  DCHECK(is_jsarray || HasOnlyJSObjectMaps(maps));
6556 
6557  ValueNode* elements_array = BuildLoadElements(object);
6558  ValueNode* index = GetInt32ElementIndex(index_object);
6559  ValueNode* length = is_jsarray ? GetInt32(BuildLoadJSArrayLength(object))
6560  : BuildLoadFixedArrayLength(elements_array);
6561 
6562  auto emit_load = [&]() -> MaybeReduceResult {
6563  ValueNode* result;
6564  if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
6566  elements_array, index,
6567  CanTreatHoleAsUndefined(maps) && LoadModeHandlesHoles(load_mode));
6568  } else if (elements_kind == PACKED_DOUBLE_ELEMENTS) {
6569  result = BuildLoadFixedDoubleArrayElement(elements_array, index);
6570  } else {
6571  DCHECK(!IsDoubleElementsKind(elements_kind));
6572  result = BuildLoadFixedArrayElement(elements_array, index);
6573  if (IsHoleyElementsKind(elements_kind)) {
6574  if (CanTreatHoleAsUndefined(maps) && LoadModeHandlesHoles(load_mode)) {
6576  } else {
6578  if (IsSmiElementsKind(elements_kind)) {
6580  }
6581  }
6582  } else if (IsSmiElementsKind(elements_kind)) {
6584  }
6585  }
6586  return result;
6587  };
6588 
6589  if (CanTreatHoleAsUndefined(maps) && LoadModeHandlesOOB(load_mode)) {
6590  ValueNode* positive_index;
6592  ValueNode* uint32_length = AddNewNode<UnsafeInt32ToUint32>({length});
6593  return SelectReduction(
6594  [&](auto& builder) {
6596  positive_index, uint32_length);
6597  },
6598  emit_load, [&] { return GetRootConstant(RootIndex::kUndefinedValue); });
6599  } else {
6603  return emit_load();
6604  }
6605 }
ValueNode * BuildLoadFixedArrayLength(ValueNode *fixed_array)
bool CanTreatHoleAsUndefined(base::Vector< const compiler::MapRef > const &receiver_maps)
ValueNode * BuildLoadElements(ValueNode *object)
ValueNode * BuildLoadJSArrayLength(ValueNode *js_array, NodeType length_type=NodeType::kSmi)
ValueNode * BuildLoadHoleyFixedDoubleArrayElement(ValueNode *elements, ValueNode *index, bool convert_hole)
ReduceResult BuildCheckNotHole(ValueNode *node)
MaybeReduceResult SelectReduction(FCond cond, FTrue if_true, FFalse if_false)
ValueNode * BuildConvertHoleToUndefined(ValueNode *node)
bool HasOnlyJSArrayMaps(base::Vector< const compiler::MapRef > maps)
Definition: maglev-ir.h:1056
bool HasOnlyJSObjectMaps(base::Vector< const compiler::MapRef > maps)
Definition: maglev-ir.h:1063
constexpr bool IsHoleyElementsKind(ElementsKind kind)
bool LoadModeHandlesHoles(KeyedAccessLoadMode load_mode)
Definition: globals.h:2713

References v8::internal::DCHECK(), GET_VALUE_OR_ABORT, v8::internal::compiler::anonymous_namespace{js-native-context-specialization.cc}::HasOnlyJSArrayMaps(), v8::internal::maglev::HasOnlyJSObjectMaps(), v8::internal::HOLEY_DOUBLE_ELEMENTS, v8::internal::index, v8::internal::IsDoubleElementsKind(), v8::internal::IsFastElementsKind(), v8::internal::IsHoleyElementsKind(), v8::internal::IsSmiElementsKind(), v8::internal::kLessThan, v8::internal::wasm::anonymous_namespace{wasm-external-refs.cc}::kOutOfBounds, v8::internal::compiler::kSmi, v8::internal::kUnsignedLessThan, v8::internal::length, v8::internal::LoadModeHandlesHoles(), v8::internal::LoadModeHandlesOOB(), v8::internal::PACKED_DOUBLE_ELEMENTS, v8::base::internal::result, and RETURN_IF_ABORT.

+ Here is the call graph for this function:

◆ TryBuildElementStoreOnJSArrayOrJSObject()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildElementStoreOnJSArrayOrJSObject ( ValueNode object,
ValueNode index_object,
ValueNode value,
base::Vector< const compiler::MapRef maps,
ElementsKind  kind,
const compiler::KeyedAccessMode keyed_mode 
)
private

Definition at line 6626 of file maglev-graph-builder.cc.

6629  {
6630  DCHECK(IsFastElementsKind(elements_kind));
6631 
6632  const bool is_jsarray = HasOnlyJSArrayMaps(maps);
6633  DCHECK(is_jsarray || HasOnlyJSObjectMaps(maps));
6634 
6635  // Get the elements array.
6636  ValueNode* elements_array = BuildLoadElements(object);
6637  GET_VALUE_OR_ABORT(value, ConvertForStoring(value, elements_kind));
6638  ValueNode* index;
6639 
6640  // TODO(verwaest): Loop peeling will turn the first iteration index of spread
6641  // literals into smi constants as well, breaking the assumption that we'll
6642  // have preallocated the space if we see known indices. Turn off this
6643  // optimization if loop peeling is on.
6644  if (keyed_mode.access_mode() == compiler::AccessMode::kStoreInLiteral &&
6645  index_object->Is<SmiConstant>() && is_jsarray && !any_peeled_loop_) {
6646  index = GetInt32ElementIndex(index_object);
6647  } else {
6648  // Check boundaries.
6649  ValueNode* elements_array_length = nullptr;
6650  ValueNode* length;
6651  if (is_jsarray) {
6653  } else {
6654  length = elements_array_length =
6655  BuildLoadFixedArrayLength(elements_array);
6656  }
6657  index = GetInt32ElementIndex(index_object);
6658  if (keyed_mode.store_mode() == KeyedAccessStoreMode::kGrowAndHandleCOW) {
6659  if (elements_array_length == nullptr) {
6660  elements_array_length = BuildLoadFixedArrayLength(elements_array);
6661  }
6662 
6663  // Validate the {index} depending on holeyness:
6664  //
6665  // For HOLEY_*_ELEMENTS the {index} must not exceed the {elements}
6666  // backing store capacity plus the maximum allowed gap, as otherwise
6667  // the (potential) backing store growth would normalize and thus
6668  // the elements kind of the {receiver} would change to slow mode.
6669  //
6670  // For JSArray PACKED_*_ELEMENTS the {index} must be within the range
6671  // [0,length+1[ to be valid. In case {index} equals {length},
6672  // the {receiver} will be extended, but kept packed.
6673  //
6674  // Non-JSArray PACKED_*_ELEMENTS always grow by adding holes because they
6675  // lack the magical length property, which requires a map transition.
6676  // So we can assume that this did not happen if we did not see this map.
6677  ValueNode* limit =
6678  IsHoleyElementsKind(elements_kind)
6679  ? AddNewNode<Int32AddWithOverflow>(
6680  {elements_array_length,
6682  : is_jsarray
6683  ? AddNewNode<Int32AddWithOverflow>({length, GetInt32Constant(1)})
6684  : elements_array_length;
6688 
6689  // Grow backing store if necessary and handle COW.
6690  elements_array = AddNewNode<MaybeGrowFastElements>(
6691  {elements_array, object, index, elements_array_length},
6692  elements_kind);
6693 
6694  // If we didn't grow {elements}, it might still be COW, in which case we
6695  // copy it now.
6696  if (IsSmiOrObjectElementsKind(elements_kind)) {
6697  DCHECK_EQ(keyed_mode.store_mode(),
6699  elements_array =
6700  AddNewNode<EnsureWritableFastElements>({elements_array, object});
6701  }
6702 
6703  // Update length if necessary.
6704  if (is_jsarray) {
6705  ValueNode* new_length =
6706  AddNewNode<UpdateJSArrayLength>({length, object, index});
6707  RecordKnownProperty(object, broker()->length_string(), new_length,
6709  }
6710  } else {
6714 
6715  // Handle COW if needed.
6716  if (IsSmiOrObjectElementsKind(elements_kind)) {
6717  if (keyed_mode.store_mode() == KeyedAccessStoreMode::kHandleCOW) {
6718  elements_array =
6719  AddNewNode<EnsureWritableFastElements>({elements_array, object});
6720  } else {
6721  // Ensure that this is not a COW FixedArray.
6723  elements_array, base::VectorOf({broker()->fixed_array_map()})));
6724  }
6725  }
6726  }
6727  }
6728 
6729  // Do the store.
6730  if (IsDoubleElementsKind(elements_kind)) {
6731  BuildStoreFixedDoubleArrayElement(elements_array, index, value);
6732  } else {
6733  BuildStoreFixedArrayElement(elements_array, index, value);
6734  }
6735 
6736  return ReduceResult::Done();
6737 }
static const uint32_t kMaxGap
Definition: js-objects.h:939
void BuildStoreFixedArrayElement(ValueNode *elements, ValueNode *index, ValueNode *value)
ReduceResult ConvertForStoring(ValueNode *node, ElementsKind kind)
void BuildStoreFixedDoubleArrayElement(ValueNode *elements, ValueNode *index, ValueNode *value)
constexpr bool IsSmiOrObjectElementsKind(ElementsKind kind)

References v8::internal::compiler::KeyedAccessMode::access_mode(), broker(), v8::internal::DCHECK(), DCHECK_EQ, GET_VALUE_OR_ABORT, v8::internal::compiler::anonymous_namespace{js-native-context-specialization.cc}::HasOnlyJSArrayMaps(), v8::internal::maglev::HasOnlyJSObjectMaps(), v8::internal::index, v8::internal::maglev::NodeBase::Is(), v8::internal::IsDoubleElementsKind(), v8::internal::IsFastElementsKind(), v8::internal::IsHoleyElementsKind(), v8::internal::IsSmiOrObjectElementsKind(), v8::internal::kGrowAndHandleCOW, v8::internal::kHandleCOW, v8::internal::JSObject::kMaxGap, v8::internal::wasm::anonymous_namespace{wasm-external-refs.cc}::kOutOfBounds, v8::internal::compiler::kStore, v8::internal::compiler::kStoreInLiteral, v8::internal::kUnsignedLessThan, v8::internal::length, RETURN_IF_ABORT, v8::internal::compiler::KeyedAccessMode::store_mode(), v8::internal::value, and v8::base::VectorOf().

+ Here is the call graph for this function:

◆ TryBuildFastCreateObjectOrArrayLiteral()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildFastCreateObjectOrArrayLiteral ( const compiler::LiteralFeedback feedback)
private

Definition at line 13975 of file maglev-graph-builder.cc.

13976  {
13977  compiler::AllocationSiteRef site = feedback.value();
13978  if (!site.boilerplate(broker()).has_value()) return {};
13979  AllocationType allocation_type =
13981 
13982  // First try to extract out the shape and values of the boilerplate, bailing
13983  // out on complex boilerplates.
13984  int max_properties = compiler::kMaxFastLiteralProperties;
13985  std::optional<VirtualObject*> maybe_value = TryReadBoilerplateForFastLiteral(
13986  *site.boilerplate(broker()), allocation_type,
13987  compiler::kMaxFastLiteralDepth, &max_properties);
13988  if (!maybe_value.has_value()) return {};
13989 
13990  // Then, use the collected information to actually create nodes in the graph.
13991  // TODO(leszeks): Add support for unwinding graph modifications, so that we
13992  // can get rid of this two pass approach.
13994  MaybeReduceResult result =
13995  BuildInlinedAllocation(*maybe_value, allocation_type);
13996  return result;
13997 }
AllocationType DependOnPretenureMode(AllocationSiteRef site)
std::optional< VirtualObject * > TryReadBoilerplateForFastLiteral(compiler::JSObjectRef boilerplate, AllocationType allocation, int max_depth, int *max_properties)
const int kMaxFastLiteralDepth
Definition: globals.h:105
const int kMaxFastLiteralProperties
Definition: globals.h:106

References v8::internal::compiler::AllocationSiteRef::boilerplate(), broker(), v8::internal::compiler::kMaxFastLiteralDepth, v8::internal::compiler::kMaxFastLiteralProperties, v8::base::internal::result, and v8::internal::compiler::SingleValueFeedback< T, K >::value().

+ Here is the call graph for this function:

◆ TryBuildFastHasInPrototypeChain()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildFastHasInPrototypeChain ( ValueNode object,
compiler::HeapObjectRef  prototype 
)
private

Definition at line 12654 of file maglev-graph-builder.cc.

12655  {
12656  auto in_prototype_chain = InferHasInPrototypeChain(object, prototype);
12657  if (in_prototype_chain == kMayBeInPrototypeChain) return {};
12658 
12659  return GetBooleanConstant(in_prototype_chain == kIsInPrototypeChain);
12660 }
InferHasInPrototypeChainResult InferHasInPrototypeChain(ValueNode *receiver, compiler::HeapObjectRef prototype)

References v8::internal::prototype.

◆ TryBuildFastInstanceOf()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildFastInstanceOf ( ValueNode object,
compiler::JSObjectRef  callable_ref,
ValueNode callable_node 
)
private

Definition at line 12725 of file maglev-graph-builder.cc.

12727  {
12728  compiler::MapRef receiver_map = callable.map(broker());
12729  compiler::NameRef name = broker()->has_instance_symbol();
12730  compiler::PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
12731  receiver_map, name, compiler::AccessMode::kLoad);
12732 
12733  // TODO(v8:11457) Support dictionary mode holders here.
12734  if (access_info.IsInvalid() || access_info.HasDictionaryHolder()) {
12735  return {};
12736  }
12737  access_info.RecordDependencies(broker()->dependencies());
12738 
12739  if (access_info.IsNotFound()) {
12740  // If there's no @@hasInstance handler, the OrdinaryHasInstance operation
12741  // takes over, but that requires the constructor to be callable.
12742  if (!receiver_map.is_callable()) return {};
12743 
12745  access_info.lookup_start_object_maps(), kStartAtPrototype);
12746 
12747  // Monomorphic property access.
12748  if (callable_node_if_not_constant) {
12750  callable_node_if_not_constant,
12751  base::VectorOf(access_info.lookup_start_object_maps())));
12752  } else {
12753  // Even if we have a constant receiver, we still have to make sure its
12754  // map is correct, in case it migrates.
12755  if (receiver_map.is_stable()) {
12756  broker()->dependencies()->DependOnStableMap(receiver_map);
12757  } else {
12759  GetConstant(callable),
12760  base::VectorOf(access_info.lookup_start_object_maps())));
12761  }
12762  }
12763 
12764  return BuildOrdinaryHasInstance(object, callable,
12765  callable_node_if_not_constant);
12766  }
12767 
12768  if (access_info.IsFastDataConstant()) {
12769  compiler::OptionalJSObjectRef holder = access_info.holder();
12770  bool found_on_proto = holder.has_value();
12771  compiler::JSObjectRef holder_ref =
12772  found_on_proto ? holder.value() : callable;
12773  if (access_info.field_representation().IsDouble()) return {};
12774  compiler::OptionalObjectRef has_instance_field =
12775  holder_ref.GetOwnFastConstantDataProperty(
12776  broker(), access_info.field_representation(),
12777  access_info.field_index(), broker()->dependencies());
12778  if (!has_instance_field.has_value() ||
12779  !has_instance_field->IsHeapObject() ||
12780  !has_instance_field->AsHeapObject().map(broker()).is_callable()) {
12781  return {};
12782  }
12783 
12784  if (found_on_proto) {
12786  access_info.lookup_start_object_maps(), kStartAtPrototype,
12787  holder.value());
12788  }
12789 
12790  ValueNode* callable_node;
12791  if (callable_node_if_not_constant) {
12792  // Check that {callable_node_if_not_constant} is actually {callable}.
12794  BuildCheckValueByReference(callable_node_if_not_constant, callable,
12795  DeoptimizeReason::kWrongValue));
12796  callable_node = callable_node_if_not_constant;
12797  } else {
12798  callable_node = GetConstant(callable);
12799  }
12801  callable_node, base::VectorOf(access_info.lookup_start_object_maps())));
12802 
12803  // Special case the common case, where @@hasInstance is
12804  // Function.p.hasInstance. In this case we don't need to call ToBoolean (or
12805  // use the continuation), since OrdinaryHasInstance is guaranteed to return
12806  // a boolean.
12807  if (has_instance_field->IsJSFunction()) {
12808  compiler::SharedFunctionInfoRef shared =
12809  has_instance_field->AsJSFunction().shared(broker());
12810  if (shared.HasBuiltinId() &&
12811  shared.builtin_id() == Builtin::kFunctionPrototypeHasInstance) {
12812  return BuildOrdinaryHasInstance(object, callable,
12813  callable_node_if_not_constant);
12814  }
12815  }
12816 
12817  // Call @@hasInstance
12819  {callable_node, object});
12820  ValueNode* call_result;
12821  {
12822  // Make sure that a lazy deopt after the @@hasInstance call also performs
12823  // ToBoolean before returning to the interpreter.
12824  DeoptFrameScope continuation_scope(
12825  this, Builtin::kToBooleanLazyDeoptContinuation);
12826 
12827  if (has_instance_field->IsJSFunction()) {
12828  SaveCallSpeculationScope saved(this);
12830  call_result,
12831  TryReduceCallForConstant(has_instance_field->AsJSFunction(), args));
12832  } else {
12833  call_result = BuildGenericCall(GetConstant(*has_instance_field),
12835  }
12836  // TODO(victorgomes): Propagate the case if we need to soft deopt.
12837  }
12838 
12839  return BuildToBoolean(call_result);
12840  }
12841 
12842  return {};
12843 }
PropertyAccessInfo GetPropertyAccessInfo(MapRef map, NameRef name, AccessMode access_mode)
void RecordDependencies(CompilationDependencies *dependencies)
MaybeReduceResult TryReduceCallForConstant(compiler::JSFunctionRef target, CallArguments &args, const compiler::FeedbackSource &feedback_source=compiler::FeedbackSource())
ReduceResult BuildOrdinaryHasInstance(ValueNode *object, compiler::JSObjectRef callable, ValueNode *callable_node)

References v8::base::args, broker(), v8::internal::compiler::SharedFunctionInfoRef::builtin_id(), v8::internal::compiler::PropertyAccessInfo::field_index(), v8::internal::compiler::PropertyAccessInfo::field_representation(), GET_VALUE_OR_ABORT, v8::internal::compiler::JSObjectRef::GetOwnFastConstantDataProperty(), v8::internal::compiler::PropertyAccessInfo::HasDictionaryHolder(), v8::internal::compiler::PropertyAccessInfo::holder(), v8::internal::compiler::MapRef::is_callable(), v8::internal::compiler::MapRef::is_stable(), v8::internal::Representation::IsDouble(), v8::internal::compiler::PropertyAccessInfo::IsFastDataConstant(), v8::internal::compiler::PropertyAccessInfo::IsInvalid(), v8::internal::compiler::PropertyAccessInfo::IsNotFound(), v8::internal::compiler::kLoad, v8::internal::kNotNullOrUndefined, v8::internal::kStartAtPrototype, v8::internal::compiler::PropertyAccessInfo::lookup_start_object_maps(), v8::internal::compiler::HeapObjectRef::map(), v8::internal::name, v8::internal::compiler::PropertyAccessInfo::RecordDependencies(), RETURN_IF_ABORT, and v8::base::VectorOf().

+ Here is the call graph for this function:

◆ TryBuildFastInstanceOfWithFeedback()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildFastInstanceOfWithFeedback ( ValueNode object,
ValueNode callable,
compiler::FeedbackSource  feedback_source 
)
private

Definition at line 12915 of file maglev-graph-builder.cc.

12917  {
12918  compiler::ProcessedFeedback const& feedback =
12919  broker()->GetFeedbackForInstanceOf(feedback_source);
12920 
12921  if (feedback.IsInsufficient()) {
12922  return EmitUnconditionalDeopt(
12923  DeoptimizeReason::kInsufficientTypeFeedbackForInstanceOf);
12924  }
12925 
12926  // Check if the right hand side is a known receiver, or
12927  // we have feedback from the InstanceOfIC.
12928  compiler::OptionalHeapObjectRef maybe_constant;
12929  if ((maybe_constant = TryGetConstant(callable)) &&
12930  maybe_constant.value().IsJSObject()) {
12931  compiler::JSObjectRef callable_ref = maybe_constant.value().AsJSObject();
12932  return TryBuildFastInstanceOf(object, callable_ref, nullptr);
12933  }
12934  if (feedback_source.IsValid()) {
12935  compiler::OptionalJSObjectRef callable_from_feedback =
12936  feedback.AsInstanceOf().value();
12937  if (callable_from_feedback) {
12938  return TryBuildFastInstanceOf(object, *callable_from_feedback, callable);
12939  }
12940  }
12941  return {};
12942 }
ProcessedFeedback const & GetFeedbackForInstanceOf(FeedbackSource const &source)
MaybeReduceResult TryBuildFastInstanceOf(ValueNode *object, compiler::JSObjectRef callable_ref, ValueNode *callable_node)

References v8::internal::compiler::ProcessedFeedback::AsInstanceOf(), broker(), v8::internal::compiler::ProcessedFeedback::IsInsufficient(), v8::internal::compiler::FeedbackSource::IsValid(), v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant(), and v8::internal::compiler::SingleValueFeedback< T, K >::value().

+ Here is the call graph for this function:

◆ TryBuildFastOrdinaryHasInstance()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildFastOrdinaryHasInstance ( ValueNode object,
compiler::JSObjectRef  callable,
ValueNode callable_node 
)
private

Definition at line 12668 of file maglev-graph-builder.cc.

12670  {
12671  const bool is_constant = callable_node_if_not_constant == nullptr;
12672  if (!is_constant) return {};
12673 
12674  if (callable.IsJSBoundFunction()) {
12675  // OrdinaryHasInstance on bound functions turns into a recursive
12676  // invocation of the instanceof operator again.
12677  compiler::JSBoundFunctionRef function = callable.AsJSBoundFunction();
12678  compiler::JSReceiverRef bound_target_function =
12679  function.bound_target_function(broker());
12680 
12681  if (bound_target_function.IsJSObject()) {
12683  object, bound_target_function.AsJSObject(), nullptr));
12684  }
12685 
12686  // If we can't build a fast instance-of, build a slow one with the
12687  // partial optimisation of using the bound target function constant.
12688  return BuildCallBuiltin<Builtin::kInstanceOf>(
12689  {GetTaggedValue(object), GetConstant(bound_target_function)});
12690  }
12691 
12692  if (callable.IsJSFunction()) {
12693  // Optimize if we currently know the "prototype" property.
12694  compiler::JSFunctionRef function = callable.AsJSFunction();
12695 
12696  // TODO(v8:7700): Remove the has_prototype_slot condition once the broker
12697  // is always enabled.
12698  if (!function.map(broker()).has_prototype_slot() ||
12699  !function.has_instance_prototype(broker()) ||
12700  function.PrototypeRequiresRuntimeLookup(broker())) {
12701  return {};
12702  }
12703 
12704  compiler::HeapObjectRef prototype =
12706  return BuildHasInPrototypeChain(object, prototype);
12707  }
12708 
12709  return {};
12710 }
HeapObjectRef DependOnPrototypeProperty(JSFunctionRef function)
ReduceResult BuildHasInPrototypeChain(ValueNode *object, compiler::HeapObjectRef prototype)

References broker(), v8::internal::prototype, and RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ TryBuildFindNonDefaultConstructorOrConstruct()

bool v8::internal::maglev::MaglevGraphBuilder::TryBuildFindNonDefaultConstructorOrConstruct ( ValueNode this_function,
ValueNode new_target,
std::pair< interpreter::Register, interpreter::Register result 
)
private

Definition at line 8217 of file maglev-graph-builder.cc.

8219  {
8220  // See also:
8221  // JSNativeContextSpecialization::ReduceJSFindNonDefaultConstructorOrConstruct
8222 
8223  compiler::OptionalHeapObjectRef maybe_constant =
8224  TryGetConstant(this_function);
8225  if (!maybe_constant) return false;
8226 
8227  compiler::MapRef function_map = maybe_constant->map(broker());
8228  compiler::HeapObjectRef current = function_map.prototype(broker());
8229 
8230  // TODO(v8:13091): Don't produce incomplete stack traces when debug is active.
8231  // We already deopt when a breakpoint is set. But it would be even nicer to
8232  // avoid producting incomplete stack traces when when debug is active, even if
8233  // there are no breakpoints - then a user inspecting stack traces via Dev
8234  // Tools would always see the full stack trace.
8235 
8236  while (true) {
8237  if (!current.IsJSFunction()) return false;
8238  compiler::JSFunctionRef current_function = current.AsJSFunction();
8239 
8240  // If there are class fields, bail out. TODO(v8:13091): Handle them here.
8241  if (current_function.shared(broker())
8243  return false;
8244  }
8245 
8246  // If there are private methods, bail out. TODO(v8:13091): Handle them here.
8247  if (current_function.context(broker())
8248  .scope_info(broker())
8249  .ClassScopeHasPrivateBrand()) {
8250  return false;
8251  }
8252 
8253  FunctionKind kind = current_function.shared(broker()).kind();
8255  // The hierarchy walk will end here; this is the last change to bail out
8256  // before creating new nodes.
8257  if (!broker()->dependencies()->DependOnArrayIteratorProtector()) {
8258  return false;
8259  }
8260 
8261  compiler::OptionalHeapObjectRef new_target_function =
8262  TryGetConstant(new_target);
8264  // Store the result register first, so that a lazy deopt in
8265  // `FastNewObject` writes `true` to this register.
8266  StoreRegister(result.first, GetBooleanConstant(true));
8267 
8268  ValueNode* object;
8269  if (new_target_function && new_target_function->IsJSFunction() &&
8270  HasValidInitialMap(new_target_function->AsJSFunction(),
8271  current_function)) {
8272  object = BuildInlinedAllocation(
8273  CreateJSConstructor(new_target_function->AsJSFunction()),
8275  } else {
8276  // We've already stored "true" into result.first, so a deopt here just
8277  // has to store result.second.
8278  LazyDeoptResultLocationScope new_location(this, result.second, 1);
8279  object = BuildCallBuiltin<Builtin::kFastNewObject>(
8280  {GetConstant(current_function), GetTaggedValue(new_target)});
8281  }
8282  StoreRegister(result.second, object);
8283  } else {
8284  StoreRegister(result.first, GetBooleanConstant(false));
8285  StoreRegister(result.second, GetConstant(current));
8286  }
8287 
8289  function_map, WhereToStart::kStartAtReceiver, current_function);
8290  return true;
8291  }
8292 
8293  // Keep walking up the class tree.
8294  current = current_function.map(broker()).prototype(broker());
8295  }
8296 }
void DependOnStablePrototypeChain(MapRef receiver_maps, WhereToStart start, OptionalJSObjectRef last_prototype=OptionalJSObjectRef())
VirtualObject * CreateJSConstructor(compiler::JSFunctionRef constructor)
bool HasValidInitialMap(compiler::JSFunctionRef new_target, compiler::JSFunctionRef constructor)
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit SharedFunctionInfo::MaglevCompilationFailedBit SharedFunctionInfo::FunctionSyntaxKindBits SharedFunctionInfo::HasDuplicateParametersBit requires_instance_members_initializer
@ kStartAtReceiver
Definition: globals.h:1729

References broker(), v8::internal::compiler::JSFunctionRef::context(), v8::internal::libvtune::function_map, v8::internal::kDefaultBaseConstructor, v8::internal::kDefaultDerivedConstructor, v8::internal::kStartAtReceiver, v8::internal::kYoung, v8::internal::compiler::HeapObjectRef::map(), v8::internal::compiler::MapRef::prototype(), v8::internal::requires_instance_members_initializer, v8::base::internal::result, v8::internal::compiler::JSFunctionRef::shared(), and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ TryBuildGetKeyedPropertyWithEnumeratedKey()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildGetKeyedPropertyWithEnumeratedKey ( ValueNode object,
const compiler::FeedbackSource feedback_source,
const compiler::ProcessedFeedback processed_feedback 
)
private

Definition at line 7574 of file maglev-graph-builder.cc.

7576  {
7577  if (current_for_in_state.index != nullptr &&
7580  bool speculating_receiver_map_matches = false;
7581  if (current_for_in_state.receiver != object) {
7582  // When the feedback is uninitialized, it is either a keyed load which
7583  // always hits the enum cache, or a keyed load that had never been
7584  // reached. In either case, we can check the map of the receiver and use
7585  // the enum cache if the map match the {cache_type}.
7586  if (processed_feedback.kind() !=
7588  return MaybeReduceResult::Fail();
7589  }
7590  if (BuildCheckHeapObject(object).IsDoneWithAbort()) {
7591  return ReduceResult::DoneWithAbort();
7592  }
7593  speculating_receiver_map_matches = true;
7594  }
7595 
7597  speculating_receiver_map_matches) {
7598  auto* receiver_map = BuildLoadTaggedField(object, HeapObject::kMapOffset);
7599  AddNewNode<CheckDynamicValue>(
7600  {receiver_map, current_for_in_state.cache_type},
7601  DeoptimizeReason::kWrongMapDynamic);
7602  if (current_for_in_state.receiver == object) {
7604  }
7605  }
7606  // TODO(leszeks): Cache the field index per iteration.
7607  auto* field_index = BuildLoadFixedArrayElement(
7610  AddNewNode<LoadTaggedFieldByFieldIndex>({object, field_index}));
7611  return ReduceResult::Done();
7612  }
7613  return MaybeReduceResult::Fail();
7614 }

References v8::internal::compiler::ProcessedFeedback::kind(), v8::internal::compiler::ProcessedFeedback::kInsufficient, and v8::internal::HeapObject::kMapOffset.

+ Here is the call graph for this function:

◆ TryBuildGlobalLoad()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildGlobalLoad ( const compiler::GlobalAccessFeedback global_access_feedback)
private

Definition at line 4274 of file maglev-graph-builder.cc.

4275  {
4276  if (global_access_feedback.IsScriptContextSlot()) {
4277  return TryBuildScriptContextLoad(global_access_feedback);
4278  } else if (global_access_feedback.IsPropertyCell()) {
4279  return TryBuildPropertyCellLoad(global_access_feedback);
4280  } else {
4281  DCHECK(global_access_feedback.IsMegamorphic());
4282  return {};
4283  }
4284 }
MaybeReduceResult TryBuildPropertyCellLoad(const compiler::GlobalAccessFeedback &global_access_feedback)
MaybeReduceResult TryBuildScriptContextLoad(const compiler::GlobalAccessFeedback &global_access_feedback)

References v8::internal::DCHECK(), v8::internal::compiler::GlobalAccessFeedback::IsMegamorphic(), v8::internal::compiler::GlobalAccessFeedback::IsPropertyCell(), v8::internal::compiler::GlobalAccessFeedback::IsScriptContextSlot(), TryBuildPropertyCellLoad(), and TryBuildScriptContextLoad().

+ Here is the call graph for this function:

◆ TryBuildGlobalStore()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildGlobalStore ( const compiler::GlobalAccessFeedback global_access_feedback)
private

Definition at line 4262 of file maglev-graph-builder.cc.

4263  {
4264  if (global_access_feedback.IsScriptContextSlot()) {
4265  return TryBuildScriptContextStore(global_access_feedback);
4266  } else if (global_access_feedback.IsPropertyCell()) {
4267  return TryBuildPropertyCellStore(global_access_feedback);
4268  } else {
4269  DCHECK(global_access_feedback.IsMegamorphic());
4270  return {};
4271  }
4272 }
MaybeReduceResult TryBuildPropertyCellStore(const compiler::GlobalAccessFeedback &global_access_feedback)
MaybeReduceResult TryBuildScriptContextStore(const compiler::GlobalAccessFeedback &global_access_feedback)

References v8::internal::DCHECK(), v8::internal::compiler::GlobalAccessFeedback::IsMegamorphic(), v8::internal::compiler::GlobalAccessFeedback::IsPropertyCell(), v8::internal::compiler::GlobalAccessFeedback::IsScriptContextSlot(), TryBuildPropertyCellStore(), and TryBuildScriptContextStore().

+ Here is the call graph for this function:

◆ TryBuildInlineCall()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildInlineCall ( ValueNode context,
ValueNode function,
ValueNode new_target,
compiler::SharedFunctionInfoRef  shared,
compiler::FeedbackCellRef  feedback_cell,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 8505 of file maglev-graph-builder.cc.

8512  {
8514  if (!feedback_cell.feedback_vector(broker())) {
8515  // TODO(verwaest): Soft deopt instead?
8516  TRACE_CANNOT_INLINE("it has not been compiled/run with feedback yet");
8517  return {};
8518  }
8519 
8520  float feedback_frequency = 0.0f;
8521  if (feedback_source.IsValid()) {
8522  compiler::ProcessedFeedback const& feedback =
8523  broker()->GetFeedbackForCall(feedback_source);
8524  feedback_frequency =
8525  feedback.IsInsufficient() ? 0.0f : feedback.AsCall().frequency();
8526  }
8527  float call_frequency = feedback_frequency * GetCurrentCallFrequency();
8528 
8529  if (!CanInlineCall(shared, call_frequency)) return {};
8530  if (ShouldEagerInlineCall(shared)) {
8531  return BuildEagerInlineCall(context, function, new_target, shared,
8532  feedback_cell, args, call_frequency);
8533  }
8534 
8535  // Should we inline call?
8536  if (inlining_depth() > max_inline_depth()) {
8537  TRACE_CANNOT_INLINE("inlining depth (" << inlining_depth()
8538  << ") >= max-depth ("
8539  << max_inline_depth() << ")");
8540  return {};
8541  }
8542 
8543  compiler::BytecodeArrayRef bytecode = shared.GetBytecodeArray(broker());
8546  return BuildEagerInlineCall(context, function, new_target, shared,
8547  feedback_cell, args, call_frequency);
8548  }
8549 
8550  TRACE_INLINING(" considering " << shared << " for inlining");
8551  auto arguments = GetArgumentsAsArrayOfValueNodes(shared, args);
8552  auto generic_call = BuildCallKnownJSFunction(context, function, new_target,
8553 #ifdef V8_ENABLE_LEAPTIERING
8554  dispatch_handle,
8555 #endif
8556  shared, arguments);
8557 
8558  // Note: We point to the generic call exception handler instead of
8559  // jump_targets_ because the former contains a BasicBlockRef that is
8560  // guaranteed to be updated correctly upon exception block creation.
8561  // BuildLoopForPeeling might reset the BasicBlockRef in jump_targets_. If this
8562  // happens, inlined calls within the peeled loop would incorrectly point to
8563  // the loop's exception handler instead of the original call's.
8564  CatchBlockDetails catch_details = GetTryCatchBlockForNonEagerInlining(
8565  generic_call->exception_handler_info());
8566  catch_details.deopt_frame_distance++;
8567  float score = call_frequency / bytecode.length();
8568  MaglevCallSiteInfo* call_site = zone()->New<MaglevCallSiteInfo>(
8569  MaglevCallerDetails{
8570  arguments, &generic_call->lazy_deopt_info()->top_frame(),
8573  /* is_eager_inline */ false, call_frequency},
8574  generic_call, feedback_cell, score);
8575  graph()->inlineable_calls().push_back(call_site);
8576  return generic_call;
8577 }
ZoneVector< MaglevCallSiteInfo * > & inlineable_calls()
Definition: maglev-graph.h:144
void add_inlined_bytecode_size(int size)
Definition: maglev-graph.h:115
CatchBlockDetails GetTryCatchBlockForNonEagerInlining(ExceptionHandlerInfo *info)
ReduceResult BuildEagerInlineCall(ValueNode *context, ValueNode *function, ValueNode *new_target, compiler::SharedFunctionInfoRef shared, compiler::FeedbackCellRef feedback_cell, CallArguments &args, float call_frequency)
bool ShouldEagerInlineCall(compiler::SharedFunctionInfoRef shared)
bool CanInlineCall(compiler::SharedFunctionInfoRef shared, float call_frequency)
KnownNodeAspects * Clone(Zone *zone) const

References v8::base::args, v8::internal::compiler::ProcessedFeedback::AsCall(), broker(), DCHECK_EQ, v8::internal::maglev::CatchBlockDetails::deopt_frame_distance, v8::internal::compiler::FeedbackCellRef::feedback_vector(), v8::internal::compiler::CallFeedback::frequency(), graph(), v8::internal::compiler::ProcessedFeedback::IsInsufficient(), v8::internal::compiler::FeedbackSource::IsValid(), v8::internal::compiler::BytecodeArrayRef::length(), TRACE_CANNOT_INLINE, and TRACE_INLINING.

+ Here is the call graph for this function:

◆ TryBuildInlinedAllocatedContext()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildInlinedAllocatedContext ( compiler::MapRef  map,
compiler::ScopeInfoRef  scope,
int  context_length 
)
private

Definition at line 14097 of file maglev-graph-builder.cc.

14098  {
14099  const int kContextAllocationLimit = 16;
14100  if (context_length > kContextAllocationLimit) return {};
14101  DCHECK_GE(context_length, Context::MIN_CONTEXT_SLOTS);
14102  auto context = CreateContext(map, context_length, scope, GetContext());
14103  ValueNode* result = BuildInlinedAllocation(context, AllocationType::kYoung);
14104  return result;
14105 }
VirtualObject * CreateContext(compiler::MapRef map, int length, compiler::ScopeInfoRef scope_info, ValueNode *previous_context, std::optional< ValueNode * > extension={})

References DCHECK_GE, v8::internal::kYoung, v8::internal::Context::MIN_CONTEXT_SLOTS, and v8::base::internal::result.

◆ TryBuildLoadDataView()

template<typename LoadNode >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildLoadDataView ( const CallArguments args,
ExternalArrayType  type 
)
private

Definition at line 9676 of file maglev-graph-builder.cc.

9677  {
9678  if (!CanSpeculateCall()) return {};
9679  if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
9680  // TODO(victorgomes): Add checks whether the array has been detached.
9681  return {};
9682  }
9683  // TODO(victorgomes): Add data view to known types.
9684  ValueNode* receiver = GetValueOrUndefined(args.receiver());
9685  AddNewNode<CheckInstanceType>({receiver}, CheckType::kCheckHeapObject,
9686  JS_DATA_VIEW_TYPE, JS_DATA_VIEW_TYPE);
9687  // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
9688  ValueNode* offset =
9690  AddNewNode<CheckJSDataViewBounds>({receiver, offset}, type);
9691  ValueNode* is_little_endian = args[1] ? args[1] : GetBooleanConstant(false);
9692  return AddNewNode<LoadNode>({receiver, offset, is_little_endian}, type);
9693 }

References v8::base::args, broker(), and v8::internal::tracing::type.

+ Here is the call graph for this function:

◆ TryBuildLoadNamedProperty() [1/2]

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildLoadNamedProperty ( ValueNode receiver,
compiler::NameRef  name,
compiler::FeedbackSource feedback_source 
)
private

Definition at line 7509 of file maglev-graph-builder.cc.

7511  {
7512  auto build_generic_access = [this, &receiver, &name, &feedback_source]() {
7513  ValueNode* context = GetContext();
7514  return AddNewNode<LoadNamedGeneric>({context, receiver}, name,
7515  feedback_source);
7516  };
7517  return TryBuildLoadNamedProperty(receiver, receiver, name, feedback_source,
7518  build_generic_access);
7519 }
MaybeReduceResult TryBuildLoadNamedProperty(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::FeedbackSource &feedback_source, GenericAccessFunc &&build_generic_access)

References v8::internal::name.

◆ TryBuildLoadNamedProperty() [2/2]

template<typename GenericAccessFunc >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildLoadNamedProperty ( ValueNode receiver,
ValueNode lookup_start_object,
compiler::NameRef  name,
compiler::FeedbackSource feedback_source,
GenericAccessFunc &&  build_generic_access 
)
private

Definition at line 7487 of file maglev-graph-builder.cc.

7490  {
7491  const compiler::ProcessedFeedback& processed_feedback =
7492  broker()->GetFeedbackForPropertyAccess(feedback_source,
7494  switch (processed_feedback.kind()) {
7496  return EmitUnconditionalDeopt(
7497  DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
7499  RETURN_IF_DONE(TryReuseKnownPropertyLoad(lookup_start_object, name));
7500  return TryBuildNamedAccess(
7501  receiver, lookup_start_object, processed_feedback.AsNamedAccess(),
7502  feedback_source, compiler::AccessMode::kLoad, build_generic_access);
7503  }
7504  default:
7505  return {};
7506  }
7507 }
ProcessedFeedback const & GetFeedbackForPropertyAccess(FeedbackSource const &source, AccessMode mode, OptionalNameRef static_name)

References v8::internal::compiler::ProcessedFeedback::AsNamedAccess(), broker(), v8::internal::compiler::ProcessedFeedback::kind(), v8::internal::compiler::ProcessedFeedback::kInsufficient, v8::internal::compiler::kLoad, v8::internal::compiler::ProcessedFeedback::kNamedAccess, v8::internal::name, and RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ TryBuildNamedAccess()

template<typename GenericAccessFunc >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildNamedAccess ( ValueNode receiver,
ValueNode lookup_start_object,
compiler::NamedAccessFeedback const &  feedback,
compiler::FeedbackSource const &  feedback_source,
compiler::AccessMode  access_mode,
GenericAccessFunc &&  build_generic_access 
)
private

Definition at line 5922 of file maglev-graph-builder.cc.

5927  {
5928  compiler::ZoneRefSet<Map> inferred_maps;
5929 
5930  bool has_deprecated_map_without_migration_target = false;
5931  if (compiler::OptionalHeapObjectRef c = TryGetConstant(lookup_start_object)) {
5932  compiler::MapRef constant_map = c.value().map(broker());
5933  if (c.value().IsJSFunction() &&
5934  feedback.name().equals(broker()->prototype_string())) {
5935  compiler::JSFunctionRef function = c.value().AsJSFunction();
5936  if (!constant_map.has_prototype_slot() ||
5937  !function.has_instance_prototype(broker()) ||
5938  function.PrototypeRequiresRuntimeLookup(broker()) ||
5939  access_mode != compiler::AccessMode::kLoad) {
5940  return {};
5941  }
5942  compiler::HeapObjectRef prototype =
5944  return GetConstant(prototype);
5945  }
5946  inferred_maps = compiler::ZoneRefSet<Map>(constant_map);
5947  } else if (feedback.maps().empty()) {
5948  // The IC is megamorphic.
5949 
5950  // We can't do megamorphic loads for lookups where the lookup start isn't
5951  // the receiver (e.g. load from super).
5952  if (receiver != lookup_start_object) return {};
5953 
5954  // Use known possible maps if we have any.
5955  NodeInfo* object_info =
5956  known_node_aspects().TryGetInfoFor(lookup_start_object);
5957  if (object_info && object_info->possible_maps_are_known()) {
5958  inferred_maps = object_info->possible_maps();
5959  } else {
5960  // If we have no known maps, make the access megamorphic.
5961  switch (access_mode) {
5963  return BuildCallBuiltin<Builtin::kLoadIC_Megamorphic>(
5964  {GetTaggedValue(receiver), GetConstant(feedback.name())},
5965  feedback_source);
5967  return BuildCallBuiltin<Builtin::kStoreIC_Megamorphic>(
5968  {GetTaggedValue(receiver), GetConstant(feedback.name()),
5970  feedback_source);
5972  return {};
5975  UNREACHABLE();
5976  }
5977  }
5978  } else {
5979  // TODO(leszeks): This is doing duplicate work with BuildCheckMaps,
5980  // consider passing the merger into there.
5981  KnownMapsMerger merger(broker(), zone(), base::VectorOf(feedback.maps()));
5982  merger.IntersectWithKnownNodeAspects(lookup_start_object,
5983  known_node_aspects());
5984  inferred_maps = merger.intersect_set();
5985  has_deprecated_map_without_migration_target =
5986  feedback.has_deprecated_map_without_migration_target();
5987  }
5988 
5989  if (inferred_maps.is_empty()) {
5990  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
5991  }
5992 
5993  ZoneVector<compiler::PropertyAccessInfo> access_infos(zone());
5994  ZoneVector<compiler::PropertyAccessInfo> access_infos_for_feedback(zone());
5995 
5996  for (compiler::MapRef map : inferred_maps) {
5997  if (map.is_deprecated()) continue;
5998 
5999  // TODO(v8:12547): Support writing to objects in shared space, which
6000  // need a write barrier that calls Object::Share to ensure the RHS is
6001  // shared.
6002  if (InstanceTypeChecker::IsAlwaysSharedSpaceJSObject(map.instance_type()) &&
6003  access_mode == compiler::AccessMode::kStore) {
6004  return {};
6005  }
6006 
6007  compiler::PropertyAccessInfo access_info =
6008  broker()->GetPropertyAccessInfo(map, feedback.name(), access_mode);
6009  access_infos_for_feedback.push_back(access_info);
6010  }
6011 
6012  compiler::AccessInfoFactory access_info_factory(broker(), zone());
6013  if (!access_info_factory.FinalizePropertyAccessInfos(
6014  access_infos_for_feedback, access_mode, &access_infos)) {
6015  return {};
6016  }
6017 
6018  // Check for monomorphic case.
6019  if (access_infos.size() == 1) {
6020  compiler::PropertyAccessInfo const& access_info = access_infos.front();
6021  base::Vector<const compiler::MapRef> maps =
6022  base::VectorOf(access_info.lookup_start_object_maps());
6023  if (HasOnlyStringMaps(maps)) {
6024  // Check for string maps before checking if we need to do an access
6025  // check. Primitive strings always get the prototype from the native
6026  // context they're operated on, so they don't need the access check.
6027  if (v8_flags.specialize_code_for_one_byte_seq_strings &&
6028  base::all_of(maps, [](compiler::MapRef map) {
6029  return map.IsSeqStringMap() && map.IsOneByteStringMap();
6030  })) {
6031  RETURN_IF_ABORT(BuildCheckSeqOneByteString(lookup_start_object));
6032  } else {
6033  RETURN_IF_ABORT(BuildCheckString(lookup_start_object));
6034  }
6035  } else if (HasOnlyNumberMaps(maps)) {
6036  RETURN_IF_ABORT(BuildCheckNumber(lookup_start_object));
6037  } else {
6039  BuildCheckMaps(lookup_start_object, maps, {},
6040  has_deprecated_map_without_migration_target));
6041  }
6042 
6043  // Generate the actual property
6044  return TryBuildPropertyAccess(receiver, lookup_start_object,
6045  feedback.name(), access_info, access_mode);
6046  } else {
6047  // TODO(victorgomes): Unify control flow logic with
6048  // TryBuildPolymorphicElementAccess.
6050  receiver, lookup_start_object, feedback, access_mode, access_infos,
6051  build_generic_access);
6052  }
6053 }
ReduceResult BuildCheckNumber(ValueNode *object)
MaybeReduceResult TryBuildPolymorphicPropertyAccess(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NamedAccessFeedback const &feedback, compiler::AccessMode access_mode, const ZoneVector< compiler::PropertyAccessInfo > &access_infos, GenericAccessFunc &&build_generic_access)
MaybeReduceResult TryBuildPropertyAccess(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info, compiler::AccessMode access_mode)
const PossibleMaps & possible_maps() const
bool HasOnlyStringMaps(base::Vector< const compiler::MapRef > maps)
Definition: maglev-ir.h:1070
bool HasOnlyNumberMaps(base::Vector< const compiler::MapRef > maps)
Definition: maglev-ir.h:1077

References v8::base::all_of(), broker(), BuildCheckMaps(), BuildCheckNumber(), BuildCheckSeqOneByteString(), BuildCheckString(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnPrototypeProperty(), EmitUnconditionalDeopt(), feedback(), v8::internal::compiler::AccessInfoFactory::FinalizePropertyAccessInfos(), v8::internal::ZoneVector< T >::front(), GetAccumulator(), GetConstant(), v8::internal::compiler::JSHeapBroker::GetPropertyAccessInfo(), GetTaggedValue(), v8::internal::compiler::MapRef::has_prototype_slot(), v8::internal::maglev::HasOnlyNumberMaps(), v8::internal::maglev::HasOnlyStringMaps(), v8::internal::ZoneCompactSet< T >::is_empty(), v8::internal::compiler::kDefine, v8::internal::compiler::kHas, v8::internal::compiler::kLoad, known_node_aspects(), v8::internal::compiler::kStore, v8::internal::compiler::kStoreInLiteral, v8::internal::compiler::PropertyAccessInfo::lookup_start_object_maps(), v8::internal::compiler::HeapObjectRef::map(), v8::internal::maglev::NodeInfo::possible_maps(), v8::internal::maglev::NodeInfo::possible_maps_are_known(), v8::internal::prototype, v8::internal::ZoneVector< T >::push_back(), RETURN_IF_ABORT, v8::internal::ZoneVector< T >::size(), TryBuildPolymorphicPropertyAccess(), TryBuildPropertyAccess(), TryGetConstant(), v8::internal::maglev::KnownNodeAspects::TryGetInfoFor(), v8::internal::UNREACHABLE(), v8::internal::v8_flags, v8::base::VectorOf(), and zone().

+ Here is the call graph for this function:

◆ TryBuildNewConsString()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildNewConsString ( ValueNode left,
ValueNode right,
AllocationType  allocation_type = AllocationType::kYoung 
)
private

Definition at line 2833 of file maglev-graph-builder.cc.

2834  {
2835  // This optimization is also done by Turboshaft.
2836  if (is_turbolev()) {
2837  return ReduceResult::Fail();
2838  }
2839  if (!v8_flags.maglev_cons_string_elision) {
2840  return ReduceResult::Fail();
2841  }
2842 
2843  DCHECK(NodeTypeIs(GetType(left), NodeType::kString));
2844  DCHECK(NodeTypeIs(GetType(right), NodeType::kString));
2845 
2846  size_t left_min_length = StringLengthStaticLowerBound(left);
2847  size_t right_min_length = StringLengthStaticLowerBound(right);
2848  bool result_is_cons_string =
2849  left_min_length + right_min_length >= ConsString::kMinLength;
2850 
2851  // TODO(olivf): Support the fast case with a non-cons string fallback.
2852  if (!result_is_cons_string) {
2853  return MaybeReduceResult::Fail();
2854  }
2855 
2856  ValueNode* left_length = BuildLoadStringLength(left);
2857  ValueNode* right_length = BuildLoadStringLength(right);
2858 
2859  auto BuildConsString = [&]() {
2860  ValueNode* new_length;
2861  MaybeReduceResult folded =
2862  TryFoldInt32BinaryOperation<Operation::kAdd>(left_length, right_length);
2863  if (folded.HasValue()) {
2864  new_length = folded.value();
2865  } else {
2866  new_length =
2867  AddNewNode<Int32AddWithOverflow>({left_length, right_length});
2868  }
2869 
2870  // TODO(olivf): Add unconditional deopt support to the Select builder
2871  // instead of disabling unconditional deopt it here.
2872  MaybeReduceResult too_long = TryBuildCheckInt32Condition(
2873  new_length, GetInt32Constant(String::kMaxLength),
2875  DeoptimizeReason::kStringTooLarge,
2876  /* allow_unconditional_deopt */ false);
2877  CHECK(!too_long.IsDoneWithAbort());
2878 
2879  ValueNode* new_map = BuildNewConsStringMap(left, right);
2880  VirtualObject* cons_string =
2881  CreateConsString(new_map, new_length, left, right);
2882  ValueNode* allocation =
2883  BuildInlinedAllocation(cons_string, allocation_type);
2884 
2885  return allocation;
2886  };
2887 
2888  return Select(
2889  [&](auto& builder) {
2890  if (left_min_length > 0) return BranchResult::kAlwaysFalse;
2892  left_length, GetInt32Constant(0));
2893  },
2894  [&] { return right; },
2895  [&] {
2896  return Select(
2897  [&](auto& builder) {
2898  if (right_min_length > 0) return BranchResult::kAlwaysFalse;
2900  right_length,
2901  GetInt32Constant(0));
2902  },
2903  [&] { return left; }, [&] { return BuildConsString(); });
2904  });
2905 }
static const uint32_t kMaxLength
Definition: string.h:523
ValueNode * BuildNewConsStringMap(ValueNode *left, ValueNode *right)
VirtualObject * CreateConsString(ValueNode *map, ValueNode *length, ValueNode *first, ValueNode *second)

References BuildBranchIfInt32Compare(), BuildInlinedAllocation(), BuildLoadStringLength(), BuildNewConsStringMap(), CHECK, CreateConsString(), v8::internal::DCHECK(), v8::internal::maglev::MaybeReduceResult::Fail(), GetInt32Constant(), GetType(), v8::internal::maglev::MaybeReduceResult::HasValue(), is_turbolev(), v8::internal::maglev::MaybeReduceResult::IsDoneWithAbort(), kAlwaysFalse, v8::internal::kEqual, v8::internal::String::kMaxLength, v8::internal::ConsString::kMinLength, v8::internal::kUnsignedLessThanEqual, v8::internal::maglev::NodeTypeIs(), Select(), StringLengthStaticLowerBound(), TryBuildCheckInt32Condition(), v8::internal::v8_flags, and v8::internal::maglev::MaybeReduceResult::value().

Referenced by BuildStringConcat().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPolymorphicElementAccess()

template<typename GenericAccessFunc >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPolymorphicElementAccess ( ValueNode object,
ValueNode index,
const compiler::KeyedAccessMode keyed_mode,
const ZoneVector< compiler::ElementAccessInfo > &  access_infos,
GenericAccessFunc &&  build_generic_access 
)
private

Definition at line 6900 of file maglev-graph-builder.cc.

6904  {
6905  if (keyed_mode.access_mode() == compiler::AccessMode::kLoad &&
6906  LoadModeHandlesOOB(keyed_mode.load_mode())) {
6907  // TODO(victorgomes): Handle OOB mode.
6908  return {};
6909  }
6910 
6911  const bool is_any_store = compiler::IsAnyStore(keyed_mode.access_mode());
6912  const int access_info_count = static_cast<int>(access_infos.size());
6913  // Stores don't return a value, so we don't need a variable for the result.
6914  MaglevSubGraphBuilder sub_graph(this, is_any_store ? 0 : 1);
6915  std::optional<MaglevSubGraphBuilder::Variable> ret_val;
6916  std::optional<MaglevSubGraphBuilder::Label> done;
6917  std::optional<MaglevSubGraphBuilder::Label> generic_access;
6918 
6920  ValueNode* object_map = BuildLoadTaggedField(object, HeapObject::kMapOffset);
6921 
6922  // TODO(pthier): We could do better here than just emitting code for each map,
6923  // as many different maps can produce the exact samce code (e.g. TypedArray
6924  // access for Uint16/Uint32/Int16/Int32/...).
6925  for (int i = 0; i < access_info_count; i++) {
6926  compiler::ElementAccessInfo const& access_info = access_infos[i];
6927  std::optional<MaglevSubGraphBuilder::Label> check_next_map;
6928  const bool handle_transitions = !access_info.transition_sources().empty();
6929  MaybeReduceResult map_check_result;
6930  if (i == access_info_count - 1) {
6931  if (handle_transitions) {
6932  compiler::MapRef transition_target =
6933  access_info.lookup_start_object_maps().front();
6934  map_check_result = BuildTransitionElementsKindOrCheckMap(
6935  object, object_map, access_info.transition_sources(),
6936  transition_target);
6937  } else {
6938  map_check_result = BuildCheckMaps(
6939  object, base::VectorOf(access_info.lookup_start_object_maps()),
6940  object_map);
6941  }
6942  } else {
6943  if (handle_transitions) {
6944  compiler::MapRef transition_target =
6945  access_info.lookup_start_object_maps().front();
6946  map_check_result = BuildTransitionElementsKindAndCompareMaps(
6947  object, object_map, access_info.transition_sources(),
6948  transition_target, &sub_graph, check_next_map);
6949  } else {
6950  map_check_result = BuildCompareMaps(
6951  object, object_map,
6952  base::VectorOf(access_info.lookup_start_object_maps()), &sub_graph,
6953  check_next_map);
6954  }
6955  }
6956  if (map_check_result.IsDoneWithAbort()) {
6957  // We know from known possible maps that this branch is not reachable,
6958  // so don't emit any code for it.
6959  continue;
6960  }
6961  MaybeReduceResult result;
6962  // TODO(victorgomes): Support RAB/GSAB backed typed arrays.
6963  if (IsRabGsabTypedArrayElementsKind(access_info.elements_kind())) {
6965  } else if (IsTypedArrayElementsKind(access_info.elements_kind())) {
6966  result = TryBuildElementAccessOnTypedArray(object, index_object,
6967  access_info, keyed_mode);
6968  } else {
6970  object, index_object, access_info, keyed_mode);
6971  }
6972 
6973  switch (result.kind()) {
6976  DCHECK_EQ(result.HasValue(), !is_any_store);
6977  if (!done.has_value()) {
6978  // We initialize the label {done} lazily on the first possible path.
6979  // If no possible path exists, it is guaranteed that BuildCheckMaps
6980  // emitted an unconditional deopt and we return DoneWithAbort at the
6981  // end. We need one extra predecessor to jump from the generic case.
6982  const int possible_predecessors = access_info_count - i + 1;
6983  if (is_any_store) {
6984  done.emplace(&sub_graph, possible_predecessors);
6985  } else {
6986  ret_val.emplace(0);
6987  done.emplace(
6988  &sub_graph, possible_predecessors,
6989  std::initializer_list<MaglevSubGraphBuilder::Variable*>{
6990  &*ret_val});
6991  }
6992  }
6993  if (!is_any_store) {
6994  sub_graph.set(*ret_val, result.value());
6995  }
6996  sub_graph.Goto(&*done);
6997  break;
6999  if (!generic_access.has_value()) {
7000  // Conservatively assume that all remaining branches can go into the
7001  // generic path, as we have to initialize the predecessors upfront.
7002  // TODO(pthier): Find a better way to do that.
7003  generic_access.emplace(&sub_graph, access_info_count - i);
7004  }
7005  sub_graph.Goto(&*generic_access);
7006  break;
7008  break;
7009  }
7010  if (check_next_map.has_value()) {
7011  sub_graph.Bind(&*check_next_map);
7012  }
7013  }
7014  if (generic_access.has_value() &&
7015  !sub_graph.TrimPredecessorsAndBind(&*generic_access).IsDoneWithAbort()) {
7016  MaybeReduceResult generic_result = build_generic_access();
7017  DCHECK(generic_result.IsDone());
7018  DCHECK_EQ(generic_result.IsDoneWithValue(), !is_any_store);
7019  if (!done.has_value()) {
7020  return is_any_store ? ReduceResult::Done() : generic_result.value();
7021  }
7022  if (!is_any_store) {
7023  sub_graph.set(*ret_val, generic_result.value());
7024  }
7025  sub_graph.Goto(&*done);
7026  }
7027  if (done.has_value()) {
7028  RETURN_IF_ABORT(sub_graph.TrimPredecessorsAndBind(&*done));
7029  return is_any_store ? ReduceResult::Done() : sub_graph.get(*ret_val);
7030  } else {
7031  return ReduceResult::DoneWithAbort();
7032  }
7033 }
ReduceResult BuildCompareMaps(ValueNode *heap_object, ValueNode *object_map, base::Vector< const compiler::MapRef > maps, MaglevSubGraphBuilder *sub_graph, std::optional< MaglevSubGraphBuilder::Label > &if_not_matched)
ReduceResult BuildTransitionElementsKindAndCompareMaps(ValueNode *heap_object, ValueNode *object_map, const ZoneVector< compiler::MapRef > &transition_sources, compiler::MapRef transition_target, MaglevSubGraphBuilder *sub_graph, std::optional< MaglevSubGraphBuilder::Label > &if_not_matched)

References v8::internal::compiler::KeyedAccessMode::access_mode(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(), v8::internal::DCHECK(), DCHECK_EQ, v8::internal::compiler::ElementAccessInfo::elements_kind(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::get(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Goto(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::compiler::IsAnyStore(), v8::internal::maglev::MaybeReduceResult::IsDone(), v8::internal::maglev::MaybeReduceResult::IsDoneWithAbort(), v8::internal::maglev::MaybeReduceResult::IsDoneWithValue(), v8::internal::IsRabGsabTypedArrayElementsKind(), v8::internal::IsTypedArrayElementsKind(), v8::internal::compiler::kLoad, v8::internal::HeapObject::kMapOffset, v8::internal::compiler::KeyedAccessMode::load_mode(), v8::internal::LoadModeHandlesOOB(), v8::internal::compiler::ElementAccessInfo::lookup_start_object_maps(), v8::base::internal::result, RETURN_IF_ABORT, v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::set(), v8::internal::ZoneVector< T >::size(), v8::internal::compiler::ElementAccessInfo::transition_sources(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::TrimPredecessorsAndBind(), v8::internal::maglev::MaybeReduceResult::value(), and v8::base::VectorOf().

+ Here is the call graph for this function:

◆ TryBuildPolymorphicPropertyAccess()

template<typename GenericAccessFunc >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPolymorphicPropertyAccess ( ValueNode receiver,
ValueNode lookup_start_object,
compiler::NamedAccessFeedback const &  feedback,
compiler::AccessMode  access_mode,
const ZoneVector< compiler::PropertyAccessInfo > &  access_infos,
GenericAccessFunc &&  build_generic_access 
)
private

Definition at line 7036 of file maglev-graph-builder.cc.

7041  {
7042  const bool is_any_store = compiler::IsAnyStore(access_mode);
7043  const int access_info_count = static_cast<int>(access_infos.size());
7044  int number_map_index_for_smi = -1;
7045 
7046  bool needs_migration = false;
7047  bool has_deprecated_map_without_migration_target =
7048  feedback.has_deprecated_map_without_migration_target();
7049  for (int i = 0; i < access_info_count; i++) {
7050  compiler::PropertyAccessInfo const& access_info = access_infos[i];
7051  DCHECK(!access_info.IsInvalid());
7052  for (compiler::MapRef map : access_info.lookup_start_object_maps()) {
7053  if (map.is_migration_target()) {
7054  needs_migration = true;
7055  }
7056  if (map.IsHeapNumberMap()) {
7057  GetOrCreateInfoFor(lookup_start_object);
7058  base::SmallVector<compiler::MapRef, 1> known_maps = {map};
7059  KnownMapsMerger merger(broker(), zone(), base::VectorOf(known_maps));
7060  merger.IntersectWithKnownNodeAspects(lookup_start_object,
7061  known_node_aspects());
7062  if (!merger.intersect_set().is_empty() &&
7063  !IsEmptyNodeType(
7064  IntersectType(GetType(lookup_start_object), NodeType::kSmi))) {
7065  DCHECK_EQ(number_map_index_for_smi, -1);
7066  number_map_index_for_smi = i;
7067  }
7068  }
7069  }
7070  }
7071 
7072  // Stores don't return a value, so we don't need a variable for the result.
7073  MaglevSubGraphBuilder sub_graph(this, is_any_store ? 0 : 1);
7074  std::optional<MaglevSubGraphBuilder::Variable> ret_val;
7075  std::optional<MaglevSubGraphBuilder::Label> done;
7076  std::optional<MaglevSubGraphBuilder::Label> is_number;
7077  std::optional<MaglevSubGraphBuilder::Label> generic_access;
7078 
7079  if (number_map_index_for_smi >= 0) {
7080  is_number.emplace(&sub_graph, 2);
7081  sub_graph.GotoIfTrue<BranchIfSmi>(&*is_number, {lookup_start_object});
7082  } else {
7083  RETURN_IF_ABORT(BuildCheckHeapObject(lookup_start_object));
7084  }
7085  ValueNode* lookup_start_object_map =
7086  BuildLoadTaggedField(lookup_start_object, HeapObject::kMapOffset);
7087 
7088  if (needs_migration) {
7089  // TODO(marja, v8:7700): Try migrating only if all comparisons failed.
7090  // TODO(marja, v8:7700): Investigate making polymorphic map comparison (with
7091  // migration) a control node (like switch).
7092  lookup_start_object_map = AddNewNode<MigrateMapIfNeeded>(
7093  {lookup_start_object_map, lookup_start_object});
7094  }
7095 
7096  int start_offset = iterator_.current_offset();
7097  SourcePositionTableIterator::IndexAndPositionState
7098  start_source_position_iterator_state =
7100  std::optional<ContinuationOffsets> continuation;
7101  if (!is_any_store && !in_peeled_iteration()) {
7102  // TODO(marja): enable continuations inside peeled iterations. Predecessor
7103  // tracking (decremented_predecessor_offsets_ etc) needs to be adapted to
7104  // make that work.
7106  }
7107 
7108  for (int i = 0; i < access_info_count; i++) {
7109  // Reset the state before generating the next polymorphic arm, in case
7110  // FindContinuationForPolymorphicPropertyLoad or a continuation in the
7111  // previous arm changed it.
7112  iterator_.SetOffset(start_offset);
7114  start_source_position_iterator_state);
7115 
7116  compiler::PropertyAccessInfo const& access_info = access_infos[i];
7117  std::optional<MaglevSubGraphBuilder::Label> check_next_map;
7118  MaybeReduceResult map_check_result;
7119  const auto& maps = access_info.lookup_start_object_maps();
7120  if (i == access_info_count - 1) {
7121  map_check_result = BuildCheckMaps(
7122  lookup_start_object, base::VectorOf(maps), {},
7123  has_deprecated_map_without_migration_target, needs_migration);
7124  } else {
7125  map_check_result =
7126  BuildCompareMaps(lookup_start_object, lookup_start_object_map,
7127  base::VectorOf(maps), &sub_graph, check_next_map);
7128  }
7129  if (map_check_result.IsDoneWithAbort()) {
7130  DCHECK_NE(i, number_map_index_for_smi);
7131  // We know from known possible maps that this branch is not reachable,
7132  // so don't emit any code for it.
7133  if (continuation) {
7134  // Also don't generate any code for the continuation, just advance
7135  // through it.
7136  while (iterator_.current_offset() < continuation->last_continuation) {
7137  iterator_.Advance();
7139  }
7140  }
7141  continue;
7142  }
7143  if (i == number_map_index_for_smi) {
7144  DCHECK(is_number.has_value());
7145  sub_graph.Goto(&*is_number);
7146  sub_graph.Bind(&*is_number);
7147  }
7148 
7149  MaybeReduceResult result;
7150  if (is_any_store) {
7151  result = TryBuildPropertyStore(receiver, lookup_start_object,
7152  feedback.name(), access_info, access_mode);
7153  } else {
7154  result = TryBuildPropertyLoad(receiver, lookup_start_object,
7155  feedback.name(), access_info);
7156  }
7157 
7158  switch (result.kind()) {
7161  DCHECK_EQ(result.HasValue(), !is_any_store);
7162  if (!done.has_value()) {
7163  // We initialize the label {done} lazily on the first possible path.
7164  // If no possible path exists, it is guaranteed that BuildCheckMaps
7165  // emitted an unconditional deopt and we return DoneWithAbort at the
7166  // end. We need one extra predecessor to jump from the generic case.
7167  const int possible_predecessors = access_info_count - i + 1;
7168  if (is_any_store) {
7169  done.emplace(&sub_graph, possible_predecessors);
7170  } else {
7171  ret_val.emplace(0);
7172  done.emplace(
7173  &sub_graph, possible_predecessors,
7174  std::initializer_list<MaglevSubGraphBuilder::Variable*>{
7175  &*ret_val});
7176  }
7177  }
7178 
7179 #ifdef DEBUG
7180  int predecessor_count_for_offset_after_continuation =
7181  continuation ? predecessor_count_[continuation->after_continuation]
7182  : 0;
7183 #endif
7184  if (!is_any_store) {
7185  if (continuation) {
7186  // Generate code for further bytecodes inside the polymorphic
7187  // branch.
7188 
7189  // First save the result of the property load in the accumulator.
7190  SetAccumulator(result.value());
7191 
7193 
7194  // The continuation stored its value into the accumulator. Take that
7195  // value as our value.
7196  sub_graph.set(*ret_val,
7199  } else {
7200  sub_graph.set(*ret_val, result.value());
7201  }
7202  }
7203  if (current_block_) {
7204  sub_graph.Goto(&*done);
7205 #ifdef DEBUG
7206  if (continuation) {
7207  DCHECK_EQ(predecessor_count_[continuation->after_continuation],
7208  predecessor_count_for_offset_after_continuation);
7209  }
7210 #endif
7211  } else {
7212  // The continuation is dead. Marking it dead decreased the predecessor
7213  // count of continuation->after_continuation. Correct for that.
7214 #ifdef DEBUG
7215  DCHECK_EQ(predecessor_count_[continuation->after_continuation],
7216  predecessor_count_for_offset_after_continuation - 1);
7217 #endif
7218  ++predecessor_count_[continuation->after_continuation];
7219  }
7220  break;
7221  }
7223  break;
7225  if (!generic_access.has_value()) {
7226  // Conservatively assume that all remaining branches can go into the
7227  // generic path, as we have to initialize the predecessors upfront.
7228  // TODO(pthier): Find a better way to do that.
7229  generic_access.emplace(&sub_graph, access_info_count - i);
7230  }
7231  sub_graph.Goto(&*generic_access);
7232  break;
7233  default:
7234  UNREACHABLE();
7235  }
7236 
7237  if (check_next_map.has_value()) {
7238  sub_graph.Bind(&*check_next_map);
7239  }
7240  }
7241 
7242  if (generic_access.has_value() &&
7243  !sub_graph.TrimPredecessorsAndBind(&*generic_access).IsDoneWithAbort()) {
7244  MaybeReduceResult generic_result = build_generic_access();
7245  DCHECK(generic_result.IsDone());
7246  DCHECK_EQ(generic_result.IsDoneWithValue(), !is_any_store);
7247 
7248  if (continuation) {
7249  DCHECK(!is_any_store);
7250  SetAccumulator(generic_result.value());
7252  if (!done.has_value()) {
7255  }
7256  sub_graph.set(*ret_val,
7259  } else {
7260  if (!done.has_value()) {
7261  return is_any_store ? ReduceResult::Done() : generic_result.value();
7262  }
7263  if (!is_any_store) {
7264  sub_graph.set(*ret_val, generic_result.value());
7265  }
7266  }
7267  sub_graph.Goto(&*done);
7268  }
7269 
7270  if (done.has_value()) {
7271  if (continuation) {
7272  DCHECK_EQ(iterator_.current_offset(), continuation->last_continuation);
7273  }
7274  RETURN_IF_ABORT(sub_graph.TrimPredecessorsAndBind(&*done));
7275  return is_any_store ? ReduceResult::Done() : sub_graph.get(*ret_val);
7276  } else {
7277  iterator_.SetOffset(start_offset);
7279  start_source_position_iterator_state);
7280  return ReduceResult::DoneWithAbort();
7281  }
7282 }
void RestoreState(const IndexAndPositionState &saved_state)
MaybeReduceResult TryBuildPropertyStore(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info, compiler::AccessMode access_mode)
void BuildContinuationForPolymorphicPropertyLoad(const ContinuationOffsets &offsets)
std::optional< ContinuationOffsets > FindContinuationForPolymorphicPropertyLoad()
MaybeReduceResult TryBuildPropertyLoad(ValueNode *receiver, ValueNode *lookup_start_object, compiler::NameRef name, compiler::PropertyAccessInfo const &access_info)

References v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(), broker(), v8::internal::DCHECK(), DCHECK_EQ, DCHECK_NE, v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::get(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Goto(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfTrue(), v8::internal::compiler::NamedAccessFeedback::has_deprecated_map_without_migration_target(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::maglev::IntersectType(), v8::internal::compiler::IsAnyStore(), v8::internal::maglev::MaybeReduceResult::IsDone(), v8::internal::maglev::MaybeReduceResult::IsDoneWithAbort(), v8::internal::maglev::MaybeReduceResult::IsDoneWithValue(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::compiler::PropertyAccessInfo::IsInvalid(), v8::internal::HeapObject::kMapOffset, v8::internal::compiler::kSmi, v8::internal::compiler::PropertyAccessInfo::lookup_start_object_maps(), v8::internal::compiler::NamedAccessFeedback::name(), v8::base::internal::result, RETURN_IF_ABORT, v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::set(), v8::internal::ZoneVector< T >::size(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::TrimPredecessorsAndBind(), v8::internal::UNREACHABLE(), v8::internal::maglev::MaybeReduceResult::value(), v8::base::VectorOf(), and v8::internal::interpreter::Register::virtual_accumulator().

Referenced by TryBuildNamedAccess().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPropertyAccess()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPropertyAccess ( ValueNode receiver,
ValueNode lookup_start_object,
compiler::NameRef  name,
compiler::PropertyAccessInfo const &  access_info,
compiler::AccessMode  access_mode 
)
private

Definition at line 5901 of file maglev-graph-builder.cc.

5904  {
5905  switch (access_mode) {
5907  return TryBuildPropertyLoad(receiver, lookup_start_object, name,
5908  access_info);
5912  DCHECK_EQ(receiver, lookup_start_object);
5913  return TryBuildPropertyStore(receiver, lookup_start_object, name,
5914  access_info, access_mode);
5916  // TODO(victorgomes): BuildPropertyTest.
5917  return {};
5918  }
5919 }

References DCHECK_EQ, v8::internal::compiler::kDefine, v8::internal::compiler::kHas, v8::internal::compiler::kLoad, v8::internal::compiler::kStore, v8::internal::compiler::kStoreInLiteral, v8::internal::name, TryBuildPropertyLoad(), and TryBuildPropertyStore().

Referenced by TryBuildNamedAccess().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPropertyCellLoad()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPropertyCellLoad ( const compiler::GlobalAccessFeedback global_access_feedback)
private

Definition at line 4218 of file maglev-graph-builder.cc.

4219  {
4220  // TODO(leszeks): A bunch of this is copied from
4221  // js-native-context-specialization.cc -- I wonder if we can unify it
4222  // somehow.
4223  DCHECK(global_access_feedback.IsPropertyCell());
4224 
4225  compiler::PropertyCellRef property_cell =
4226  global_access_feedback.property_cell();
4227  if (!property_cell.Cache(broker())) return {};
4228 
4229  compiler::ObjectRef property_cell_value = property_cell.value(broker());
4230  if (property_cell_value.IsPropertyCellHole()) {
4231  // The property cell is no longer valid.
4232  return EmitUnconditionalDeopt(
4233  DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
4234  }
4235 
4236  PropertyDetails property_details = property_cell.property_details();
4237  PropertyCellType property_cell_type = property_details.cell_type();
4238  DCHECK_EQ(PropertyKind::kData, property_details.kind());
4239 
4240  if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
4241  return GetConstant(property_cell_value);
4242  }
4243 
4244  // Record a code dependency on the cell if we can benefit from the
4245  // additional feedback, or the global property is configurable (i.e.
4246  // can be deleted or reconfigured to an accessor property).
4247  if (property_cell_type != PropertyCellType::kMutable ||
4248  property_details.IsConfigurable()) {
4249  broker()->dependencies()->DependOnGlobalProperty(property_cell);
4250  }
4251 
4252  // Load from constant/undefined global property can be constant-folded.
4253  if (property_cell_type == PropertyCellType::kConstant ||
4254  property_cell_type == PropertyCellType::kUndefined) {
4255  return GetConstant(property_cell_value);
4256  }
4257 
4258  ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
4259  return BuildLoadTaggedField(property_cell_node, PropertyCell::kValueOffset);
4260 }

References broker(), BuildLoadTaggedField(), v8::internal::compiler::PropertyCellRef::Cache(), v8::internal::PropertyDetails::cell_type(), v8::internal::DCHECK(), DCHECK_EQ, v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnGlobalProperty(), EmitUnconditionalDeopt(), GetConstant(), v8::internal::PropertyDetails::IsConfigurable(), v8::internal::compiler::GlobalAccessFeedback::IsPropertyCell(), v8::internal::PropertyDetails::IsReadOnly(), v8::internal::kConstant, v8::internal::kData, v8::internal::PropertyDetails::kind(), v8::internal::kMutable, v8::internal::kUndefined, v8::internal::compiler::GlobalAccessFeedback::property_cell(), v8::internal::compiler::PropertyCellRef::property_details(), and v8::internal::compiler::PropertyCellRef::value().

Referenced by TryBuildGlobalLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPropertyCellStore()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPropertyCellStore ( const compiler::GlobalAccessFeedback global_access_feedback)
private

Definition at line 4116 of file maglev-graph-builder.cc.

4117  {
4118  DCHECK(global_access_feedback.IsPropertyCell());
4119 
4120  compiler::PropertyCellRef property_cell =
4121  global_access_feedback.property_cell();
4122  if (!property_cell.Cache(broker())) return {};
4123 
4124  compiler::ObjectRef property_cell_value = property_cell.value(broker());
4125  if (property_cell_value.IsPropertyCellHole()) {
4126  // The property cell is no longer valid.
4127  return EmitUnconditionalDeopt(
4128  DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
4129  }
4130 
4131  PropertyDetails property_details = property_cell.property_details();
4132  DCHECK_EQ(PropertyKind::kData, property_details.kind());
4133 
4134  if (property_details.IsReadOnly()) {
4135  // Don't even bother trying to lower stores to read-only data
4136  // properties.
4137  // TODO(neis): We could generate code that checks if the new value
4138  // equals the old one and then does nothing or deopts, respectively.
4139  return {};
4140  }
4141 
4142  switch (property_details.cell_type()) {
4144  return {};
4146  // Record a code dependency on the cell, and just deoptimize if the new
4147  // value doesn't match the previous value stored inside the cell.
4148  broker()->dependencies()->DependOnGlobalProperty(property_cell);
4149  ValueNode* value = GetAccumulator();
4151  value, property_cell_value, DeoptimizeReason::kStoreToConstant);
4152  }
4154  // We rely on stability further below.
4155  if (property_cell_value.IsHeapObject() &&
4156  !property_cell_value.AsHeapObject().map(broker()).is_stable()) {
4157  return {};
4158  }
4159  // Record a code dependency on the cell, and just deoptimize if the new
4160  // value's type doesn't match the type of the previous value in the cell.
4161  broker()->dependencies()->DependOnGlobalProperty(property_cell);
4162  ValueNode* value = GetAccumulator();
4163  if (property_cell_value.IsHeapObject()) {
4164  compiler::MapRef property_cell_value_map =
4165  property_cell_value.AsHeapObject().map(broker());
4166  broker()->dependencies()->DependOnStableMap(property_cell_value_map);
4169  BuildCheckMaps(value, base::VectorOf({property_cell_value_map})));
4170  } else {
4172  }
4173  ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
4174  BuildStoreTaggedField(property_cell_node, value,
4175  PropertyCell::kValueOffset,
4177  break;
4178  }
4180  // Record a code dependency on the cell, and just deoptimize if the
4181  // property ever becomes read-only.
4182  broker()->dependencies()->DependOnGlobalProperty(property_cell);
4183  ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
4184  BuildStoreTaggedField(property_cell_node, GetAccumulator(),
4185  PropertyCell::kValueOffset,
4187  break;
4188  }
4190  UNREACHABLE();
4191  }
4192  return ReduceResult::Done();
4193 }
ReduceResult BuildCheckNumericalValueOrByReference(ValueNode *node, compiler::ObjectRef ref, DeoptimizeReason reason)

References broker(), BuildCheckHeapObject(), BuildCheckMaps(), BuildCheckNumericalValueOrByReference(), BuildStoreTaggedField(), v8::internal::compiler::PropertyCellRef::Cache(), v8::internal::PropertyDetails::cell_type(), v8::internal::DCHECK(), DCHECK_EQ, v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnGlobalProperty(), v8::internal::compiler::CompilationDependencies::DependOnStableMap(), v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), GetAccumulator(), GetConstant(), GetSmiValue(), v8::internal::compiler::GlobalAccessFeedback::IsPropertyCell(), v8::internal::PropertyDetails::IsReadOnly(), v8::internal::kConstant, v8::internal::kConstantType, v8::internal::kData, v8::internal::maglev::kDefault, v8::internal::PropertyDetails::kind(), v8::internal::kInTransition, v8::internal::kMutable, v8::internal::kUndefined, v8::internal::compiler::HeapObjectRef::map(), v8::internal::compiler::GlobalAccessFeedback::property_cell(), v8::internal::compiler::PropertyCellRef::property_details(), RETURN_IF_ABORT, v8::internal::UNREACHABLE(), v8::internal::compiler::PropertyCellRef::value(), v8::internal::value, and v8::base::VectorOf().

Referenced by TryBuildGlobalStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPropertyGetterCall()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPropertyGetterCall ( compiler::PropertyAccessInfo const &  access_info,
ValueNode receiver,
ValueNode lookup_start_object 
)
private

Definition at line 5481 of file maglev-graph-builder.cc.

5483  {
5484  compiler::ObjectRef constant = access_info.constant().value();
5485 
5486  if (access_info.IsDictionaryProtoAccessorConstant()) {
5487  // For fast mode holders we recorded dependencies in BuildPropertyLoad.
5488  for (const compiler::MapRef map : access_info.lookup_start_object_maps()) {
5490  map, access_info.name(), constant, PropertyKind::kAccessor);
5491  }
5492  }
5493 
5494  // Introduce the call to the getter function.
5495  if (constant.IsJSFunction()) {
5496  ConvertReceiverMode receiver_mode =
5497  receiver == lookup_start_object
5500  CallArguments args(receiver_mode, {receiver});
5501  return TryReduceCallForConstant(constant.AsJSFunction(), args);
5502  } else {
5503  // Disable optimizations for super ICs using API getters, so that we get
5504  // the correct receiver checks.
5505  if (receiver != lookup_start_object) return {};
5506  compiler::FunctionTemplateInfoRef templ = constant.AsFunctionTemplateInfo();
5507  CallArguments args(ConvertReceiverMode::kNotNullOrUndefined, {receiver});
5508 
5509  return TryReduceCallForApiFunction(templ, {}, args);
5510  }
5511 }
void DependOnConstantInDictionaryPrototypeChain(MapRef receiver_map, NameRef property_name, ObjectRef constant, PropertyKind kind)

References v8::base::args, broker(), v8::internal::compiler::PropertyAccessInfo::constant(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnConstantInDictionaryPrototypeChain(), v8::internal::compiler::PropertyAccessInfo::IsDictionaryProtoAccessorConstant(), v8::internal::kAccessor, v8::internal::kAny, v8::internal::kNotNullOrUndefined, v8::internal::compiler::PropertyAccessInfo::lookup_start_object_maps(), v8::internal::compiler::PropertyAccessInfo::name(), TryReduceCallForApiFunction(), and TryReduceCallForConstant().

Referenced by TryBuildPropertyLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPropertyLoad()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPropertyLoad ( ValueNode receiver,
ValueNode lookup_start_object,
compiler::NameRef  name,
compiler::PropertyAccessInfo const &  access_info 
)
private

Definition at line 5794 of file maglev-graph-builder.cc.

5796  {
5797  if (access_info.holder().has_value() && !access_info.HasDictionaryHolder()) {
5799  access_info.lookup_start_object_maps(), kStartAtPrototype,
5800  access_info.holder().value());
5801  }
5802 
5803  switch (access_info.kind()) {
5805  UNREACHABLE();
5807  return GetRootConstant(RootIndex::kUndefinedValue);
5810  ValueNode* result =
5811  BuildLoadField(access_info, lookup_start_object, name);
5812  RecordKnownProperty(lookup_start_object, name, result,
5813  AccessInfoGuaranteedConst(access_info),
5815  return result;
5816  }
5818  compiler::OptionalObjectRef constant =
5819  TryFoldLoadDictPrototypeConstant(access_info);
5820  if (!constant.has_value()) return {};
5821  return GetConstant(constant.value());
5822  }
5825  return TryBuildPropertyGetterCall(access_info, receiver,
5826  lookup_start_object);
5828  ValueNode* cell = GetConstant(access_info.constant().value().AsCell());
5829  return BuildLoadTaggedField<LoadTaggedFieldForProperty>(
5830  cell, Cell::kValueOffset, name);
5831  }
5833  DCHECK_EQ(receiver, lookup_start_object);
5834  ValueNode* result = BuildLoadStringLength(receiver);
5835  RecordKnownProperty(lookup_start_object, name, result,
5836  AccessInfoGuaranteedConst(access_info),
5838  return result;
5839  }
5841  // TODO(dmercadier): update KnownNodeInfo.
5842  ValueNode* string = BuildLoadTaggedField(
5843  lookup_start_object, JSPrimitiveWrapper::kValueOffset);
5844  return AddNewNode<StringLength>({string});
5845  }
5847  CHECK(!IsRabGsabTypedArrayElementsKind(access_info.elements_kind()));
5848  if (receiver != lookup_start_object) {
5849  // We're accessing the TypedArray length via a prototype (a TypedArray
5850  // object in the prototype chain, objects below it not having a "length"
5851  // property, reading via super.length). That will throw a TypeError.
5852  // This should never occur in any realistic code, so we can deopt here
5853  // instead of implementing special handling for it.
5854  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongMap);
5855  }
5856  return BuildLoadTypedArrayLength(lookup_start_object,
5857  access_info.elements_kind());
5858  }
5859  }
5860 }
compiler::OptionalObjectRef TryFoldLoadDictPrototypeConstant(compiler::PropertyAccessInfo const &access_info)
MaybeReduceResult TryBuildPropertyGetterCall(compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, ValueNode *lookup_start_object)
ValueNode * BuildLoadField(compiler::PropertyAccessInfo const &access_info, ValueNode *lookup_start_object, compiler::NameRef name)
bool AccessInfoGuaranteedConst(compiler::PropertyAccessInfo const &access_info)

References v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::AccessInfoGuaranteedConst(), broker(), BuildLoadField(), BuildLoadStringLength(), BuildLoadTaggedField(), BuildLoadTypedArrayLength(), CHECK, v8::internal::compiler::PropertyAccessInfo::constant(), DCHECK_EQ, v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnStablePrototypeChains(), v8::internal::compiler::PropertyAccessInfo::elements_kind(), EmitUnconditionalDeopt(), GetConstant(), GetRootConstant(), v8::internal::compiler::PropertyAccessInfo::HasDictionaryHolder(), v8::internal::compiler::PropertyAccessInfo::holder(), v8::internal::IsRabGsabTypedArrayElementsKind(), v8::internal::compiler::PropertyAccessInfo::kDataField, v8::internal::compiler::PropertyAccessInfo::kDictionaryProtoAccessorConstant, v8::internal::compiler::PropertyAccessInfo::kDictionaryProtoDataConstant, v8::internal::compiler::PropertyAccessInfo::kFastAccessorConstant, v8::internal::compiler::PropertyAccessInfo::kFastDataConstant, v8::internal::compiler::PropertyAccessInfo::kind(), v8::internal::compiler::PropertyAccessInfo::kInvalid, v8::internal::compiler::kLoad, v8::internal::compiler::PropertyAccessInfo::kModuleExport, v8::internal::compiler::PropertyAccessInfo::kNotFound, v8::internal::kStartAtPrototype, v8::internal::compiler::PropertyAccessInfo::kStringLength, v8::internal::compiler::PropertyAccessInfo::kStringWrapperLength, v8::internal::compiler::PropertyAccessInfo::kTypedArrayLength, v8::internal::compiler::PropertyAccessInfo::lookup_start_object_maps(), v8::internal::name, RecordKnownProperty(), v8::base::internal::result, TryBuildPropertyGetterCall(), TryFoldLoadDictPrototypeConstant(), and v8::internal::UNREACHABLE().

Referenced by TryBuildPropertyAccess().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPropertySetterCall()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPropertySetterCall ( compiler::PropertyAccessInfo const &  access_info,
ValueNode receiver,
ValueNode lookup_start_object,
ValueNode value 
)
private

Definition at line 5513 of file maglev-graph-builder.cc.

5515  {
5516  // Setting super properties shouldn't end up here.
5517  DCHECK_EQ(receiver, lookup_start_object);
5518  compiler::ObjectRef constant = access_info.constant().value();
5519  if (constant.IsJSFunction()) {
5521  {receiver, value});
5522  RETURN_IF_ABORT(TryReduceCallForConstant(constant.AsJSFunction(), args));
5523  } else {
5524  compiler::FunctionTemplateInfoRef templ = constant.AsFunctionTemplateInfo();
5526  {receiver, value});
5528  }
5529  // Ignore the return value of the setter call.
5530  return ReduceResult::Done();
5531 }

References v8::base::args, v8::internal::compiler::PropertyAccessInfo::constant(), DCHECK_EQ, v8::internal::maglev::ReduceResult::Done(), v8::internal::kNotNullOrUndefined, RETURN_IF_ABORT, TryReduceCallForApiFunction(), TryReduceCallForConstant(), and v8::internal::value.

Referenced by TryBuildPropertyStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildPropertyStore()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildPropertyStore ( ValueNode receiver,
ValueNode lookup_start_object,
compiler::NameRef  name,
compiler::PropertyAccessInfo const &  access_info,
compiler::AccessMode  access_mode 
)
private

Definition at line 5862 of file maglev-graph-builder.cc.

5865  {
5866  if (access_info.holder().has_value()) {
5868  access_info.lookup_start_object_maps(), kStartAtPrototype,
5869  access_info.holder().value());
5870  }
5871 
5872  switch (access_info.kind()) {
5874  return TryBuildPropertySetterCall(access_info, receiver,
5875  lookup_start_object, GetAccumulator());
5876  }
5879  MaybeReduceResult res =
5880  TryBuildStoreField(access_info, receiver, access_mode);
5881  if (res.IsDone()) {
5884  AccessInfoGuaranteedConst(access_info), access_mode);
5885  return res;
5886  }
5887  return {};
5888  }
5897  UNREACHABLE();
5898  }
5899 }
MaybeReduceResult TryBuildStoreField(compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, compiler::AccessMode access_mode)
MaybeReduceResult TryBuildPropertySetterCall(compiler::PropertyAccessInfo const &access_info, ValueNode *receiver, ValueNode *lookup_start_object, ValueNode *value)

References v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::AccessInfoGuaranteedConst(), v8::internal::maglev::InterpreterFrameState::accumulator(), broker(), current_interpreter_frame_, v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnStablePrototypeChains(), GetAccumulator(), v8::internal::compiler::PropertyAccessInfo::holder(), v8::internal::maglev::MaybeReduceResult::IsDone(), v8::internal::compiler::PropertyAccessInfo::kDataField, v8::internal::compiler::PropertyAccessInfo::kDictionaryProtoAccessorConstant, v8::internal::compiler::PropertyAccessInfo::kDictionaryProtoDataConstant, v8::internal::compiler::PropertyAccessInfo::kFastAccessorConstant, v8::internal::compiler::PropertyAccessInfo::kFastDataConstant, v8::internal::compiler::PropertyAccessInfo::kind(), v8::internal::compiler::PropertyAccessInfo::kInvalid, v8::internal::compiler::PropertyAccessInfo::kModuleExport, v8::internal::compiler::PropertyAccessInfo::kNotFound, v8::internal::kStartAtPrototype, v8::internal::compiler::PropertyAccessInfo::kStringLength, v8::internal::compiler::PropertyAccessInfo::kStringWrapperLength, v8::internal::compiler::PropertyAccessInfo::kTypedArrayLength, v8::internal::compiler::PropertyAccessInfo::lookup_start_object_maps(), v8::internal::name, RecordKnownProperty(), TryBuildPropertySetterCall(), TryBuildStoreField(), and v8::internal::UNREACHABLE().

Referenced by TryBuildPropertyAccess().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildScriptContextConstantLoad()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildScriptContextConstantLoad ( const compiler::GlobalAccessFeedback global_access_feedback)
private

Definition at line 4195 of file maglev-graph-builder.cc.

4196  {
4197  DCHECK(global_access_feedback.IsScriptContextSlot());
4198  if (!global_access_feedback.immutable()) return {};
4199  compiler::OptionalObjectRef maybe_slot_value =
4200  global_access_feedback.script_context().get(
4201  broker(), global_access_feedback.slot_index());
4202  if (!maybe_slot_value) return {};
4203  return GetConstant(maybe_slot_value.value());
4204 }

References broker(), v8::internal::DCHECK(), v8::internal::compiler::ContextRef::get(), GetConstant(), v8::internal::compiler::GlobalAccessFeedback::immutable(), v8::internal::compiler::GlobalAccessFeedback::IsScriptContextSlot(), v8::internal::compiler::GlobalAccessFeedback::script_context(), and v8::internal::compiler::GlobalAccessFeedback::slot_index().

Referenced by TryBuildScriptContextLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildScriptContextLoad()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildScriptContextLoad ( const compiler::GlobalAccessFeedback global_access_feedback)
private

Definition at line 4206 of file maglev-graph-builder.cc.

4207  {
4208  DCHECK(global_access_feedback.IsScriptContextSlot());
4209  RETURN_IF_DONE(TryBuildScriptContextConstantLoad(global_access_feedback));
4210  auto script_context = GetConstant(global_access_feedback.script_context());
4211  ContextSlotMutability mutability =
4212  global_access_feedback.immutable() ? kImmutable : kMutable;
4213  return LoadAndCacheContextSlot(script_context,
4214  global_access_feedback.slot_index(),
4215  mutability, ContextMode::kHasContextCells);
4216 }
MaybeReduceResult TryBuildScriptContextConstantLoad(const compiler::GlobalAccessFeedback &global_access_feedback)

References v8::internal::DCHECK(), GetConstant(), v8::internal::compiler::GlobalAccessFeedback::immutable(), v8::internal::compiler::GlobalAccessFeedback::IsScriptContextSlot(), v8::internal::kHasContextCells, kImmutable, kMutable, LoadAndCacheContextSlot(), RETURN_IF_DONE, v8::internal::compiler::GlobalAccessFeedback::script_context(), v8::internal::compiler::GlobalAccessFeedback::slot_index(), and TryBuildScriptContextConstantLoad().

Referenced by TryBuildGlobalLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildScriptContextStore()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildScriptContextStore ( const compiler::GlobalAccessFeedback global_access_feedback)
private

Definition at line 4106 of file maglev-graph-builder.cc.

4107  {
4108  DCHECK(global_access_feedback.IsScriptContextSlot());
4109  if (global_access_feedback.immutable()) return {};
4110  auto script_context = GetConstant(global_access_feedback.script_context());
4111  return StoreAndCacheContextSlot(
4112  script_context, global_access_feedback.slot_index(), GetAccumulator(),
4114 }

References v8::internal::DCHECK(), GetAccumulator(), GetConstant(), v8::internal::compiler::GlobalAccessFeedback::immutable(), v8::internal::compiler::GlobalAccessFeedback::IsScriptContextSlot(), v8::internal::kHasContextCells, v8::internal::compiler::GlobalAccessFeedback::script_context(), v8::internal::compiler::GlobalAccessFeedback::slot_index(), and StoreAndCacheContextSlot().

Referenced by TryBuildGlobalStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildStoreDataView()

template<typename StoreNode , typename Function >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildStoreDataView ( const CallArguments args,
ExternalArrayType  type,
Function &&  getValue 
)
private

Definition at line 9696 of file maglev-graph-builder.cc.

9697  {
9698  if (!CanSpeculateCall()) return {};
9699  if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) {
9700  // TODO(victorgomes): Add checks whether the array has been detached.
9701  return {};
9702  }
9703  // TODO(victorgomes): Add data view to known types.
9704  ValueNode* receiver = GetValueOrUndefined(args.receiver());
9705  AddNewNode<CheckInstanceType>({receiver}, CheckType::kCheckHeapObject,
9706  JS_DATA_VIEW_TYPE, JS_DATA_VIEW_TYPE);
9707  // TODO(v8:11111): Optimize for JS_RAB_GSAB_DATA_VIEW_TYPE too.
9708  ValueNode* offset =
9710  AddNewNode<CheckJSDataViewBounds>({receiver, offset},
9712  ValueNode* value = getValue(args[1]);
9713  ValueNode* is_little_endian = args[2] ? args[2] : GetBooleanConstant(false);
9714  AddNewNode<StoreNode>({receiver, offset, value, is_little_endian}, type);
9715  return GetRootConstant(RootIndex::kUndefinedValue);
9716 }
@ kExternalFloat64Array
Definition: globals.h:2473

References v8::base::args, broker(), v8::internal::kExternalFloat64Array, v8::internal::tracing::type, and v8::internal::value.

+ Here is the call graph for this function:

◆ TryBuildStoreField()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryBuildStoreField ( compiler::PropertyAccessInfo const &  access_info,
ValueNode receiver,
compiler::AccessMode  access_mode 
)
private

Definition at line 5674 of file maglev-graph-builder.cc.

5676  {
5677  FieldIndex field_index = access_info.field_index();
5678  Representation field_representation = access_info.field_representation();
5679 
5680  compiler::OptionalMapRef original_map;
5681  if (access_info.HasTransitionMap()) {
5682  compiler::MapRef transition = access_info.transition_map().value();
5683  original_map = transition.GetBackPointer(broker()).AsMap();
5684 
5685  if (original_map->UnusedPropertyFields() == 0) {
5686  DCHECK(!field_index.is_inobject());
5687  }
5688  if (!field_index.is_inobject()) {
5689  // If slack tracking ends after this compilation started but before it's
5690  // finished, then {original_map} could be out-of-sync with {transition}.
5691  // In particular, its UnusedPropertyFields could be non-zero, which would
5692  // lead us to not extend the property backing store, while the underlying
5693  // Map has actually zero UnusedPropertyFields. Thus, we install a
5694  // dependency on {orininal_map} now, so that if such a situation happens,
5695  // we'll throw away the code.
5696  broker()->dependencies()->DependOnNoSlackTrackingChange(*original_map);
5697  }
5698  } else if (access_info.IsFastDataConstant() &&
5699  access_mode == compiler::AccessMode::kStore) {
5700  return EmitUnconditionalDeopt(DeoptimizeReason::kStoreToConstant);
5701  }
5702 
5703  ValueNode* value = GetAccumulator();
5704  if (IsEmptyNodeType(GetType(value))) {
5705  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongValue);
5706  }
5707 
5708  if (field_representation.IsSmi()) {
5710  } else {
5711  if (field_representation.IsHeapObject()) {
5712  // Emit a map check for the field type, if needed, otherwise just a
5713  // HeapObject check.
5714  if (access_info.field_map().has_value()) {
5716  value, base::VectorOf({access_info.field_map().value()})));
5717  } else {
5719  }
5720  }
5721  }
5722 
5723  ValueNode* store_target;
5724  if (field_index.is_inobject()) {
5725  store_target = receiver;
5726  } else {
5727  // The field is in the property array, first load it from there.
5728  store_target =
5729  BuildLoadTaggedField(receiver, JSReceiver::kPropertiesOrHashOffset);
5730  if (original_map && original_map->UnusedPropertyFields() == 0) {
5731  store_target = BuildExtendPropertiesBackingStore(*original_map, receiver,
5732  store_target);
5733  }
5734  }
5735 
5736  if (field_representation.IsDouble()) {
5737  if (access_info.HasTransitionMap()) {
5738  // Allocate the mutable double box owned by the field.
5739  ValueNode* heapnumber_value =
5740  AddNewNode<Float64ToHeapNumberForField>({value});
5741  BuildStoreTaggedField(store_target, heapnumber_value,
5742  field_index.offset(),
5744  BuildStoreMap(receiver, access_info.transition_map().value(),
5746  } else {
5747  AddNewNode<StoreDoubleField>({store_target, value}, field_index.offset());
5748  }
5749  return ReduceResult::Done();
5750  }
5751 
5752 
5753  StoreTaggedMode store_mode = access_info.HasTransitionMap()
5756  if (field_representation.IsSmi()) {
5758  field_index.offset(), store_mode);
5759  } else {
5760  DCHECK(field_representation.IsHeapObject() ||
5761  field_representation.IsTagged());
5762  BuildStoreTaggedField(store_target, value, field_index.offset(),
5763  store_mode);
5764  }
5765  if (access_info.HasTransitionMap()) {
5766  BuildStoreMap(receiver, access_info.transition_map().value(),
5768  }
5769 
5770  return ReduceResult::Done();
5771 }
Node * BuildStoreTaggedFieldNoWriteBarrier(ValueNode *object, ValueNode *value, int offset, StoreTaggedMode store_mode)
MaybeReduceResult GetAccumulatorSmi(UseReprHintRecording record_use_repr_hint=UseReprHintRecording::kRecord)
ValueNode * BuildExtendPropertiesBackingStore(compiler::MapRef map, ValueNode *receiver, ValueNode *property_array)

References broker(), BuildCheckHeapObject(), BuildCheckMaps(), BuildExtendPropertiesBackingStore(), BuildLoadTaggedField(), BuildStoreMap(), BuildStoreTaggedField(), BuildStoreTaggedFieldNoWriteBarrier(), v8::internal::DCHECK(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnNoSlackTrackingChange(), v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), v8::internal::compiler::PropertyAccessInfo::field_index(), v8::internal::compiler::PropertyAccessInfo::field_map(), v8::internal::compiler::PropertyAccessInfo::field_representation(), GetAccumulator(), GetAccumulatorSmi(), v8::internal::compiler::MapRef::GetBackPointer(), GetType(), v8::internal::compiler::PropertyAccessInfo::HasTransitionMap(), v8::internal::FieldIndex::is_inobject(), v8::internal::Representation::IsDouble(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::compiler::PropertyAccessInfo::IsFastDataConstant(), v8::internal::Representation::IsHeapObject(), v8::internal::Representation::IsSmi(), v8::internal::Representation::IsTagged(), v8::internal::maglev::kDefault, v8::internal::compiler::kStore, v8::internal::maglev::kTransitioning, v8::internal::maglev::StoreMap::kTransitioning, v8::internal::FieldIndex::offset(), RETURN_IF_ABORT, v8::internal::compiler::PropertyAccessInfo::transition_map(), v8::internal::value, and v8::base::VectorOf().

Referenced by TryBuildPropertyStore().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryBuildStoreTaggedFieldToAllocation()

void v8::internal::maglev::MaglevGraphBuilder::TryBuildStoreTaggedFieldToAllocation ( ValueNode object,
ValueNode value,
int  offset 
)
private

Definition at line 5213 of file maglev-graph-builder.cc.

5215  {
5216  if (offset == HeapObject::kMapOffset) return;
5217  if (!CanTrackObjectChanges(object, TrackObjectMode::kStore)) return;
5218  // This avoids loop in the object graph.
5219  if (value->Is<InlinedAllocation>()) return;
5220  InlinedAllocation* allocation = object->Cast<InlinedAllocation>();
5221  VirtualObject* vobject = GetModifiableObjectFromAllocation(allocation);
5222  CHECK_EQ(vobject->type(), VirtualObject::kDefault);
5223  CHECK_NOT_NULL(vobject);
5224  vobject->set(offset, value);
5225  AddNonEscapingUses(allocation, 1);
5226  if (v8_flags.trace_maglev_object_tracking) {
5227  std::cout << " * Setting value in virtual object "
5228  << PrintNodeLabel(graph_labeller(), vobject) << "[" << offset
5229  << "]: " << PrintNode(graph_labeller(), value) << std::endl;
5230  }
5231 }
VirtualObject * GetModifiableObjectFromAllocation(InlinedAllocation *allocation)

References AddNonEscapingUses(), CanTrackObjectChanges(), CHECK_EQ, CHECK_NOT_NULL, GetModifiableObjectFromAllocation(), graph_labeller(), v8::internal::maglev::VirtualObject::kDefault, v8::internal::HeapObject::kMapOffset, kStore, v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::internal::v8_flags, and v8::internal::value.

Referenced by BuildStoreTaggedField(), and BuildStoreTaggedFieldNoWriteBarrier().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryFoldFloat64BinaryOperationForToNumber() [1/2]

template<Operation kOperation>
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryFoldFloat64BinaryOperationForToNumber ( TaggedToFloat64ConversionType  conversion_type,
ValueNode left,
double  cst_right 
)
private

Definition at line 2546 of file maglev-graph-builder.cc.

2548  {
2549  auto cst_left = TryGetFloat64Constant(left, conversion_type);
2550  if (!cst_left.has_value()) return {};
2551  switch (kOperation) {
2552  case Operation::kAdd:
2553  return GetNumberConstant(cst_left.value() + cst_right);
2554  case Operation::kSubtract:
2555  return GetNumberConstant(cst_left.value() - cst_right);
2556  case Operation::kMultiply:
2557  return GetNumberConstant(cst_left.value() * cst_right);
2558  case Operation::kDivide:
2559  return GetNumberConstant(cst_left.value() / cst_right);
2560  case Operation::kModulus:
2561  // TODO(v8:7700): Constant fold mod.
2562  return {};
2563  case Operation::kExponentiate:
2564  return GetNumberConstant(math::pow(cst_left.value(), cst_right));
2565  default:
2566  UNREACHABLE();
2567  }
2568 }
ValueNode * GetNumberConstant(double constant)
double pow(double x, double y)
Definition: ieee754.cc:14

References GetNumberConstant(), v8::kOperation, v8::internal::math::pow(), TryGetFloat64Constant(), and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ TryFoldFloat64BinaryOperationForToNumber() [2/2]

template<Operation kOperation>
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryFoldFloat64BinaryOperationForToNumber ( TaggedToFloat64ConversionType  conversion_type,
ValueNode left,
ValueNode right 
)
private

Definition at line 2536 of file maglev-graph-builder.cc.

2538  {
2539  auto cst_right = TryGetFloat64Constant(right, conversion_type);
2540  if (!cst_right.has_value()) return {};
2541  return TryFoldFloat64BinaryOperationForToNumber<kOperation>(
2542  conversion_type, left, cst_right.value());
2543 }

References TryGetFloat64Constant().

+ Here is the call graph for this function:

◆ TryFoldFloat64UnaryOperationForToNumber()

template<Operation kOperation>
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryFoldFloat64UnaryOperationForToNumber ( TaggedToFloat64ConversionType  conversion_type,
ValueNode value 
)
private

Definition at line 2519 of file maglev-graph-builder.cc.

2520  {
2521  auto cst = TryGetFloat64Constant(value, conversion_type);
2522  if (!cst.has_value()) return {};
2523  switch (kOperation) {
2524  case Operation::kNegate:
2525  return GetNumberConstant(-cst.value());
2526  case Operation::kIncrement:
2527  return GetNumberConstant(cst.value() + 1);
2528  case Operation::kDecrement:
2529  return GetNumberConstant(cst.value() - 1);
2530  default:
2531  UNREACHABLE();
2532  }
2533 }

References GetNumberConstant(), v8::kOperation, TryGetFloat64Constant(), v8::internal::UNREACHABLE(), and v8::internal::value.

+ Here is the call graph for this function:

◆ TryFoldInt32BinaryOperation() [1/2]

template<Operation kOperation>
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryFoldInt32BinaryOperation ( ValueNode left,
int32_t  cst_right 
)
private

Definition at line 2368 of file maglev-graph-builder.cc.

2369  {
2370  auto cst_left = TryGetInt32Constant(left);
2371  if (!cst_left.has_value()) return {};
2372  switch (kOperation) {
2373  case Operation::kAdd: {
2374  int64_t result = static_cast<int64_t>(cst_left.value()) +
2375  static_cast<int64_t>(cst_right);
2376  if (result >= INT32_MIN && result <= INT32_MAX) {
2377  return GetInt32Constant(static_cast<int32_t>(result));
2378  }
2379  return {};
2380  }
2381  case Operation::kSubtract: {
2382  int64_t result = static_cast<int64_t>(cst_left.value()) -
2383  static_cast<int64_t>(cst_right);
2384  if (result >= INT32_MIN && result <= INT32_MAX) {
2385  return GetInt32Constant(static_cast<int32_t>(result));
2386  }
2387  return {};
2388  }
2389  case Operation::kMultiply: {
2390  int64_t result = static_cast<int64_t>(cst_left.value()) *
2391  static_cast<int64_t>(cst_right);
2392  if (result >= INT32_MIN && result <= INT32_MAX) {
2393  return GetInt32Constant(static_cast<int32_t>(result));
2394  }
2395  return {};
2396  }
2397  case Operation::kModulus:
2398  // TODO(v8:7700): Constant fold mod.
2399  return {};
2400  case Operation::kDivide:
2401  // TODO(v8:7700): Constant fold division.
2402  return {};
2404  return GetInt32Constant(cst_left.value() & cst_right);
2405  case Operation::kBitwiseOr:
2406  return GetInt32Constant(cst_left.value() | cst_right);
2408  return GetInt32Constant(cst_left.value() ^ cst_right);
2409  case Operation::kShiftLeft:
2410  return GetInt32Constant(cst_left.value()
2411  << (static_cast<uint32_t>(cst_right) % 32));
2412  case Operation::kShiftRight:
2413  return GetInt32Constant(cst_left.value() >>
2414  (static_cast<uint32_t>(cst_right) % 32));
2415  case Operation::kShiftRightLogical:
2416  return GetUint32Constant(static_cast<uint32_t>(cst_left.value()) >>
2417  (static_cast<uint32_t>(cst_right) % 32));
2418  default:
2419  UNREACHABLE();
2420  }
2421 }
WordBinopKindMask::For< WordBinopOp::Kind::kBitwiseXor > kBitwiseXor
Definition: opmasks.h:179
ShiftKindMask::For< ShiftOp::Kind::kShiftLeft > kShiftLeft
Definition: opmasks.h:235
WordBinopKindMask::For< WordBinopOp::Kind::kBitwiseAnd > kBitwiseAnd
Definition: opmasks.h:178

References GetInt32Constant(), GetUint32Constant(), v8::kOperation, v8::base::internal::result, TryGetInt32Constant(), and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ TryFoldInt32BinaryOperation() [2/2]

template<Operation kOperation>
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryFoldInt32BinaryOperation ( ValueNode left,
ValueNode right 
)
private

Definition at line 2360 of file maglev-graph-builder.cc.

2361  {
2362  auto cst_right = TryGetInt32Constant(right);
2363  if (!cst_right.has_value()) return {};
2364  return TryFoldInt32BinaryOperation<kOperation>(left, cst_right.value());
2365 }

References TryGetInt32Constant().

+ Here is the call graph for this function:

◆ TryFoldInt32UnaryOperation()

template<Operation kOperation>
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryFoldInt32UnaryOperation ( ValueNode value)
private

Definition at line 2303 of file maglev-graph-builder.cc.

2304  {
2305  auto cst = TryGetInt32Constant(node);
2306  if (!cst.has_value()) return {};
2307  switch (kOperation) {
2308  case Operation::kBitwiseNot:
2309  return GetInt32Constant(~cst.value());
2310  case Operation::kIncrement:
2311  if (cst.value() < INT32_MAX) {
2312  return GetInt32Constant(cst.value() + 1);
2313  }
2314  return {};
2315  case Operation::kDecrement:
2316  if (cst.value() > INT32_MIN) {
2317  return GetInt32Constant(cst.value() - 1);
2318  }
2319  return {};
2320  case Operation::kNegate:
2321  if (cst.value() == 0) {
2322  return {};
2323  }
2324  if (cst.value() != INT32_MIN) {
2325  return GetInt32Constant(-cst.value());
2326  }
2327  return {};
2328  default:
2329  UNREACHABLE();
2330  }
2331 }

References GetInt32Constant(), v8::kOperation, TryGetInt32Constant(), and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ TryFoldLoadConstantDataField()

compiler::OptionalObjectRef v8::internal::maglev::MaglevGraphBuilder::TryFoldLoadConstantDataField ( compiler::JSObjectRef  holder,
compiler::PropertyAccessInfo const &  access_info 
)
private

Definition at line 5464 of file maglev-graph-builder.cc.

5466  {
5467  DCHECK(!access_info.field_representation().IsDouble());
5468  return holder.GetOwnFastConstantDataProperty(
5469  broker(), access_info.field_representation(), access_info.field_index(),
5470  broker()->dependencies());
5471 }

References broker(), v8::internal::DCHECK(), v8::internal::compiler::PropertyAccessInfo::field_index(), v8::internal::compiler::PropertyAccessInfo::field_representation(), v8::internal::compiler::JSObjectRef::GetOwnFastConstantDataProperty(), and v8::internal::Representation::IsDouble().

Referenced by BuildLoadField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryFoldLoadConstantDoubleField()

std::optional< Float64 > v8::internal::maglev::MaglevGraphBuilder::TryFoldLoadConstantDoubleField ( compiler::JSObjectRef  holder,
compiler::PropertyAccessInfo const &  access_info 
)
private

Definition at line 5473 of file maglev-graph-builder.cc.

5475  {
5476  DCHECK(access_info.field_representation().IsDouble());
5477  return holder.GetOwnFastConstantDoubleProperty(
5478  broker(), access_info.field_index(), broker()->dependencies());
5479 }

References broker(), v8::internal::DCHECK(), v8::internal::compiler::PropertyAccessInfo::field_index(), v8::internal::compiler::PropertyAccessInfo::field_representation(), v8::internal::compiler::JSObjectRef::GetOwnFastConstantDoubleProperty(), and v8::internal::Representation::IsDouble().

Referenced by BuildLoadField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryFoldLoadDictPrototypeConstant()

compiler::OptionalObjectRef v8::internal::maglev::MaglevGraphBuilder::TryFoldLoadDictPrototypeConstant ( compiler::PropertyAccessInfo const &  access_info)
private

Definition at line 5415 of file maglev-graph-builder.cc.

5416  {
5418  DCHECK(access_info.IsDictionaryProtoDataConstant());
5419  DCHECK(access_info.holder().has_value());
5420 
5421  compiler::OptionalObjectRef constant =
5422  access_info.holder()->GetOwnDictionaryProperty(
5423  broker(), access_info.dictionary_index(), broker()->dependencies());
5424  if (!constant.has_value()) return {};
5425 
5426  for (compiler::MapRef map : access_info.lookup_start_object_maps()) {
5427  DirectHandle<Map> map_handle = map.object();
5428  // Non-JSReceivers that passed AccessInfoFactory::ComputePropertyAccessInfo
5429  // must have different lookup start map.
5430  if (!IsJSReceiverMap(*map_handle)) {
5431  // Perform the implicit ToObject for primitives here.
5432  // Implemented according to ES6 section 7.3.2 GetV (V, P).
5433  Tagged<JSFunction> constructor =
5435  *map_handle, *broker()->target_native_context().object())
5436  .value();
5437  // {constructor.initial_map()} is loaded/stored with acquire-release
5438  // semantics for constructors.
5439  map = MakeRefAssumeMemoryFence(broker(), constructor->initial_map());
5440  DCHECK(IsJSObjectMap(*map.object()));
5441  }
5443  map, access_info.name(), constant.value(), PropertyKind::kData);
5444  }
5445 
5446  return constant;
5447 }
static std::optional< Tagged< JSFunction > > GetConstructorFunction(Tagged< Map > map, Tagged< Context > native_context)
Definition: map.cc:53
#define V8_DICT_PROPERTY_CONST_TRACKING_BOOL
Definition: globals.h:257

References broker(), v8::internal::DCHECK(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnConstantInDictionaryPrototypeChain(), v8::internal::compiler::PropertyAccessInfo::dictionary_index(), v8::internal::Map::GetConstructorFunction(), v8::internal::compiler::PropertyAccessInfo::holder(), v8::internal::compiler::PropertyAccessInfo::IsDictionaryProtoDataConstant(), v8::internal::kData, v8::internal::compiler::PropertyAccessInfo::lookup_start_object_maps(), v8::internal::compiler::MakeRefAssumeMemoryFence(), v8::internal::compiler::PropertyAccessInfo::name(), and V8_DICT_PROPERTY_CONST_TRACKING_BOOL.

Referenced by TryBuildPropertyLoad().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryGetConstant()

compiler::OptionalHeapObjectRef v8::internal::maglev::MaglevGraphBuilder::TryGetConstant ( ValueNode node,
ValueNode **  constant_node = nullptr 
)
private

Definition at line 3060 of file maglev-graph-builder.cc.

3061  {
3062  if (auto result = node->TryGetConstant(broker())) {
3063  if (constant_node) *constant_node = node;
3064  return result;
3065  }
3066  if (auto c = TryGetConstantAlternative(node)) {
3067  return TryGetConstant(*c, constant_node);
3068  }
3069  return {};
3070 }
std::optional< ValueNode * > TryGetConstantAlternative(ValueNode *node)

References broker(), v8::base::internal::result, v8::internal::maglev::ValueNode::TryGetConstant(), and TryGetConstantAlternative().

Referenced by BuildCheckMaps(), BuildLoadFixedArrayElement(), BuildTestUndetectable(), CheckContextExtensions(), TryBuildNamedAccess(), TryGetConstantDataFieldHolder(), TryReduceCompareEqualAgainstConstant(), and TrySpecializeLoadContextSlotToFunctionContext().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryGetConstantAlternative()

std::optional< ValueNode * > v8::internal::maglev::MaglevGraphBuilder::TryGetConstantAlternative ( ValueNode node)
private

Definition at line 3072 of file maglev-graph-builder.cc.

3073  {
3074  const NodeInfo* info = known_node_aspects().TryGetInfoFor(node);
3075  if (info) {
3076  if (auto c = info->alternative().checked_value()) {
3077  if (IsConstantNode(c->opcode())) {
3078  return c;
3079  }
3080  }
3081  }
3082  return {};
3083 }

References v8::internal::maglev::NodeInfo::alternative(), v8::internal::maglev::IsConstantNode(), known_node_aspects(), and v8::internal::maglev::KnownNodeAspects::TryGetInfoFor().

Referenced by TryGetConstant(), TryGetFloat64Constant(), TryGetInt32Constant(), and TryGetUint32Constant().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryGetConstantDataFieldHolder()

compiler::OptionalJSObjectRef v8::internal::maglev::MaglevGraphBuilder::TryGetConstantDataFieldHolder ( compiler::PropertyAccessInfo const &  access_info,
ValueNode lookup_start_object 
)
private

Definition at line 5449 of file maglev-graph-builder.cc.

5451  {
5452  if (!access_info.IsFastDataConstant()) return {};
5453  if (access_info.holder().has_value()) {
5454  return access_info.holder();
5455  }
5456  if (compiler::OptionalHeapObjectRef c = TryGetConstant(lookup_start_object)) {
5457  if (c.value().IsJSObject()) {
5458  return c.value().AsJSObject();
5459  }
5460  }
5461  return {};
5462 }

References v8::internal::compiler::PropertyAccessInfo::holder(), v8::internal::compiler::PropertyAccessInfo::IsFastDataConstant(), and TryGetConstant().

Referenced by BuildLoadField().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryGetFloat64Constant()

std::optional< double > v8::internal::maglev::MaglevGraphBuilder::TryGetFloat64Constant ( ValueNode value,
TaggedToFloat64ConversionType  conversion_type 
)
private

Definition at line 1919 of file maglev-graph-builder.cc.

1920  {
1921  switch (value->opcode()) {
1922  case Opcode::kConstant: {
1923  compiler::ObjectRef object = value->Cast<Constant>()->object();
1924  if (object.IsHeapNumber()) {
1925  return object.AsHeapNumber().value();
1926  }
1927  // Oddballs should be RootConstants.
1928  DCHECK(!IsOddball(*object.object()));
1929  return {};
1930  }
1931  case Opcode::kInt32Constant:
1932  return value->Cast<Int32Constant>()->value();
1933  case Opcode::kSmiConstant:
1934  return value->Cast<SmiConstant>()->value().value();
1936  return value->Cast<Float64Constant>()->value().get_scalar();
1937  case Opcode::kRootConstant: {
1938  Tagged<Object> root_object =
1939  local_isolate_->root(value->Cast<RootConstant>()->index());
1940  if (conversion_type == TaggedToFloat64ConversionType::kNumberOrBoolean &&
1941  IsBoolean(root_object)) {
1942  return Cast<Oddball>(root_object)->to_number_raw();
1943  }
1944  if (conversion_type == TaggedToFloat64ConversionType::kNumberOrOddball &&
1945  IsOddball(root_object)) {
1946 #ifdef V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
1947  if (IsUndefined(root_object)) {
1948  // We use the undefined nan and silence it to produce the same result
1949  // as a computation from non-constants would.
1950  auto ud = Float64::FromBits(kUndefinedNanInt64);
1951  return ud.to_quiet_nan().get_scalar();
1952  }
1953 #endif // V8_ENABLE_EXPERIMENTAL_UNDEFINED_DOUBLE
1954  return Cast<Oddball>(root_object)->to_number_raw();
1955  }
1956  if (IsHeapNumber(root_object)) {
1957  return Cast<HeapNumber>(root_object)->value();
1958  }
1959  return {};
1960  }
1961  default:
1962  break;
1963  }
1964  if (auto c = TryGetConstantAlternative(value)) {
1965  return TryGetFloat64Constant(*c, conversion_type);
1966  }
1967  return {};
1968 }

References v8::internal::DCHECK(), v8::internal::Float64::FromBits(), v8::internal::maglev::RootConstant::index(), v8::internal::compiler::anonymous_namespace{gap-resolver.cc}::kConstant, v8::internal::maglev::kNumberOrBoolean, v8::internal::maglev::kNumberOrOddball, local_isolate_, v8::internal::LocalIsolate::root(), TryGetConstantAlternative(), and v8::internal::value.

Referenced by GetFloat64ForToNumber(), TryFoldFloat64BinaryOperationForToNumber(), and TryFoldFloat64UnaryOperationForToNumber().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryGetInt32Constant()

std::optional< int32_t > v8::internal::maglev::MaglevGraphBuilder::TryGetInt32Constant ( ValueNode value)
private

Definition at line 1788 of file maglev-graph-builder.cc.

1789  {
1790  switch (value->opcode()) {
1791  case Opcode::kInt32Constant:
1792  return value->Cast<Int32Constant>()->value();
1793  case Opcode::kUint32Constant: {
1794  uint32_t uint32_value = value->Cast<Uint32Constant>()->value();
1795  if (uint32_value <= INT32_MAX) {
1796  return static_cast<int32_t>(uint32_value);
1797  }
1798  return {};
1799  }
1800  case Opcode::kSmiConstant:
1801  return value->Cast<SmiConstant>()->value().value();
1802  case Opcode::kFloat64Constant: {
1803  double double_value =
1804  value->Cast<Float64Constant>()->value().get_scalar();
1805  if (!IsInt32Double(double_value)) return {};
1806  return FastD2I(value->Cast<Float64Constant>()->value().get_scalar());
1807  }
1808  default:
1809  break;
1810  }
1811  if (auto c = TryGetConstantAlternative(value)) {
1812  return TryGetInt32Constant(*c);
1813  }
1814  return {};
1815 }
bool IsInt32Double(double value)

References v8::internal::FastD2I(), v8::internal::Float64::get_scalar(), v8::internal::IsInt32Double(), TryGetConstantAlternative(), v8::internal::maglev::Float64Constant::value(), and v8::internal::value.

Referenced by BuildLoadFixedArrayElement(), BuildLoadFixedDoubleArrayElement(), GetInt32(), TryFoldInt32BinaryOperation(), and TryFoldInt32UnaryOperation().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryGetNonEscapingArgumentsObject()

std::optional< VirtualObject * > v8::internal::maglev::MaglevGraphBuilder::TryGetNonEscapingArgumentsObject ( ValueNode value)
private

Definition at line 11669 of file maglev-graph-builder.cc.

11669  {
11670  if (!value->Is<InlinedAllocation>()) return {};
11671  InlinedAllocation* alloc = value->Cast<InlinedAllocation>();
11672  // Although the arguments object has not been changed so far, since it is not
11673  // escaping, it could be modified after this bytecode if it is inside a loop.
11674  if (IsInsideLoop()) {
11675  if (!is_loop_effect_tracking() ||
11676  !loop_effects_->allocations.contains(alloc)) {
11677  return {};
11678  }
11679  }
11680  // TODO(victorgomes): We can probably loosen the IsNotEscaping requirement if
11681  // we keep track of the arguments object changes so far.
11682  if (alloc->IsEscaping()) return {};
11683  VirtualObject* object = alloc->object();
11684  if (!object->has_static_map()) return {};
11685  // TODO(victorgomes): Support simple JSArray forwarding.
11686  compiler::MapRef map = object->map();
11687  // It is a rest parameter, if it is an array with ArgumentsElements node as
11688  // the elements array.
11689  if (map.IsJSArrayMap() && object->get(JSArgumentsObject::kElementsOffset)
11690  ->Is<ArgumentsElements>()) {
11691  return object;
11692  }
11693  // TODO(victorgomes): We can loosen the IsSloppyMappedArgumentsObject
11694  // requirement if there is no stores to the mapped arguments.
11695  if (map.IsJSArgumentsObjectMap() &&
11697  return object;
11698  }
11699  return {};
11700 }
bool IsSloppyMappedArgumentsObject(compiler::JSHeapBroker *broker, compiler::MapRef map)

References broker(), v8::internal::maglev::VirtualObject::has_static_map(), v8::internal::maglev::InlinedAllocation::IsEscaping(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::IsSloppyMappedArgumentsObject(), v8::internal::compiler::HeapObjectRef::map(), v8::internal::maglev::InlinedAllocation::object(), and v8::internal::value.

+ Here is the call graph for this function:

◆ TryGetParentContext()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::TryGetParentContext ( ValueNode node)
private

Definition at line 144 of file maglev-graph-builder.cc.

144  {
145  if (CreateFunctionContext* n = node->TryCast<CreateFunctionContext>()) {
146  return n->context().node();
147  }
148 
149  if (InlinedAllocation* alloc = node->TryCast<InlinedAllocation>()) {
150  return alloc->object()->get(
152  }
153 
154  if (CallRuntime* n = node->TryCast<CallRuntime>()) {
155  switch (n->function_id()) {
156  case Runtime::kPushBlockContext:
157  case Runtime::kPushCatchContext:
158  case Runtime::kNewFunctionContext:
159  return n->context().node();
160  default:
161  break;
162  }
163  }
164 
165  return nullptr;
166 }

References v8::internal::Context::OffsetOfElementAt(), v8::internal::Context::PREVIOUS_INDEX, and v8::internal::maglev::NodeBase::TryCast().

Referenced by MinimizeContextChainDepth().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryGetUint32Constant()

std::optional< uint32_t > v8::internal::maglev::MaglevGraphBuilder::TryGetUint32Constant ( ValueNode value)
private

Definition at line 1817 of file maglev-graph-builder.cc.

1818  {
1819  switch (value->opcode()) {
1820  case Opcode::kInt32Constant: {
1821  int32_t int32_value = value->Cast<Int32Constant>()->value();
1822  if (int32_value >= 0) {
1823  return static_cast<uint32_t>(int32_value);
1824  }
1825  return {};
1826  }
1827  case Opcode::kUint32Constant:
1828  return value->Cast<Uint32Constant>()->value();
1829  case Opcode::kSmiConstant: {
1830  int32_t smi_value = value->Cast<SmiConstant>()->value().value();
1831  if (smi_value >= 0) {
1832  return static_cast<uint32_t>(smi_value);
1833  }
1834  return {};
1835  }
1836  case Opcode::kFloat64Constant: {
1837  double double_value =
1838  value->Cast<Float64Constant>()->value().get_scalar();
1839  if (!IsUint32Double(double_value)) return {};
1840  return FastD2UI(value->Cast<Float64Constant>()->value().get_scalar());
1841  }
1842  default:
1843  break;
1844  }
1845  if (auto c = TryGetConstantAlternative(value)) {
1846  return TryGetUint32Constant(*c);
1847  }
1848  return {};
1849 }
bool IsUint32Double(double value)
unsigned int FastD2UI(double x)

References v8::internal::FastD2UI(), v8::internal::Float64::get_scalar(), v8::internal::IsUint32Double(), TryGetConstantAlternative(), v8::internal::maglev::Float64Constant::value(), and v8::internal::value.

+ Here is the call graph for this function:

◆ TryInferApiHolderValue()

compiler::HolderLookupResult v8::internal::maglev::MaglevGraphBuilder::TryInferApiHolderValue ( compiler::FunctionTemplateInfoRef  function_template_info,
ValueNode receiver 
)
private

Definition at line 11364 of file maglev-graph-builder.cc.

11366  {
11367  const compiler::HolderLookupResult not_found;
11368 
11369  auto receiver_info = known_node_aspects().TryGetInfoFor(receiver);
11370  if (!receiver_info || !receiver_info->possible_maps_are_known()) {
11371  // No info about receiver, can't infer API holder.
11372  return not_found;
11373  }
11374  DCHECK(!receiver_info->possible_maps().is_empty());
11375  compiler::MapRef first_receiver_map = receiver_info->possible_maps()[0];
11376 
11377  // See if we can constant-fold the compatible receiver checks.
11378  compiler::HolderLookupResult api_holder =
11379  function_template_info.LookupHolderOfExpectedType(broker(),
11380  first_receiver_map);
11381  if (api_holder.lookup == CallOptimization::kHolderNotFound) {
11382  // Can't infer API holder.
11383  return not_found;
11384  }
11385 
11386  // Check that all {receiver_maps} are actually JSReceiver maps and
11387  // that the {function_template_info} accepts them without access
11388  // checks (even if "access check needed" is set for {receiver}).
11389  //
11390  // API holder might be a receivers's hidden prototype (i.e. the receiver is
11391  // a global proxy), so in this case the map check or stability dependency on
11392  // the receiver guard us from detaching a global object from global proxy.
11393  CHECK(first_receiver_map.IsJSReceiverMap());
11394  CHECK(!first_receiver_map.is_access_check_needed() ||
11395  function_template_info.accept_any_receiver());
11396 
11397  for (compiler::MapRef receiver_map : receiver_info->possible_maps()) {
11398  compiler::HolderLookupResult holder_i =
11399  function_template_info.LookupHolderOfExpectedType(broker(),
11400  receiver_map);
11401 
11402  if (api_holder.lookup != holder_i.lookup) {
11403  // Different API holders, dynamic lookup is required.
11404  return not_found;
11405  }
11406  DCHECK(holder_i.lookup == CallOptimization::kHolderFound ||
11407  holder_i.lookup == CallOptimization::kHolderIsReceiver);
11408  if (holder_i.lookup == CallOptimization::kHolderFound) {
11409  DCHECK(api_holder.holder.has_value() && holder_i.holder.has_value());
11410  if (!api_holder.holder->equals(*holder_i.holder)) {
11411  // Different API holders, dynamic lookup is required.
11412  return not_found;
11413  }
11414  }
11415 
11416  CHECK(receiver_map.IsJSReceiverMap());
11417  CHECK(!receiver_map.is_access_check_needed() ||
11418  function_template_info.accept_any_receiver());
11419  }
11420  return api_holder;
11421 }

References v8::internal::compiler::FunctionTemplateInfoRef::accept_any_receiver(), broker(), CHECK, v8::internal::DCHECK(), v8::internal::compiler::HolderLookupResult::holder, v8::internal::compiler::MapRef::is_access_check_needed(), v8::internal::CallOptimization::kHolderFound, v8::internal::CallOptimization::kHolderIsReceiver, v8::internal::CallOptimization::kHolderNotFound, v8::internal::compiler::HolderLookupResult::lookup, and v8::internal::compiler::FunctionTemplateInfoRef::LookupHolderOfExpectedType().

+ Here is the call graph for this function:

◆ TryReadBoilerplateForFastLiteral()

std::optional< VirtualObject * > v8::internal::maglev::MaglevGraphBuilder::TryReadBoilerplateForFastLiteral ( compiler::JSObjectRef  boilerplate,
AllocationType  allocation,
int  max_depth,
int max_properties 
)
private

Definition at line 13171 of file maglev-graph-builder.cc.

13173  {
13174  DCHECK_GE(max_depth, 0);
13175  DCHECK_GE(*max_properties, 0);
13176 
13177  if (max_depth == 0) return {};
13178 
13179  // Prevent concurrent migrations of boilerplate objects.
13180  compiler::JSHeapBroker::BoilerplateMigrationGuardIfNeeded
13181  boilerplate_access_guard(broker());
13182 
13183  // Now that we hold the migration lock, get the current map.
13184  compiler::MapRef boilerplate_map = boilerplate.map(broker());
13185  // Protect against concurrent changes to the boilerplate object by checking
13186  // for an identical value at the end of the compilation.
13188  boilerplate, HeapObject::kMapOffset, boilerplate_map);
13189  {
13190  compiler::OptionalMapRef current_boilerplate_map =
13191  boilerplate.map_direct_read(broker());
13192  if (!current_boilerplate_map.has_value() ||
13193  !current_boilerplate_map->equals(boilerplate_map)) {
13194  // TODO(leszeks): Emit an eager deopt for this case, so that we can
13195  // re-learn the boilerplate. This will be easier once we get rid of the
13196  // two-pass approach, since we'll be able to create the eager deopt here
13197  // and return a ReduceResult::DoneWithAbort().
13198  return {};
13199  }
13200  }
13201 
13202  // Bail out if the boilerplate map has been deprecated. The map could of
13203  // course be deprecated at some point after the line below, but it's not a
13204  // correctness issue -- it only means the literal won't be created with the
13205  // most up to date map(s).
13206  if (boilerplate_map.is_deprecated()) return {};
13207 
13208  // We currently only support in-object properties.
13209  if (boilerplate.map(broker()).elements_kind() == DICTIONARY_ELEMENTS ||
13210  boilerplate.map(broker()).is_dictionary_map() ||
13211  !boilerplate.raw_properties_or_hash(broker()).has_value()) {
13212  return {};
13213  }
13214  {
13215  compiler::ObjectRef properties =
13216  *boilerplate.raw_properties_or_hash(broker());
13217  bool const empty =
13218  properties.IsSmi() ||
13219  properties.equals(MakeRef(
13220  broker(), local_isolate()->factory()->empty_fixed_array())) ||
13221  properties.equals(MakeRef(
13222  broker(),
13223  Cast<Object>(local_isolate()->factory()->empty_property_array())));
13224  if (!empty) return {};
13225  }
13226 
13227  compiler::OptionalFixedArrayBaseRef maybe_elements =
13228  boilerplate.elements(broker(), kRelaxedLoad);
13229  if (!maybe_elements.has_value()) return {};
13230  compiler::FixedArrayBaseRef boilerplate_elements = maybe_elements.value();
13232  boilerplate, JSObject::kElementsOffset, boilerplate_elements);
13233  const uint32_t elements_length = boilerplate_elements.length();
13234 
13235  VirtualObject* fast_literal;
13236  if (boilerplate_map.IsJSArrayMap()) {
13237  MaybeReduceResult fast_array = CreateJSArray(
13238  boilerplate_map, boilerplate_map.instance_size(),
13239  GetConstant(boilerplate.AsJSArray().GetBoilerplateLength(broker())));
13240  CHECK(fast_array.HasValue());
13241  fast_literal = fast_array.value()->Cast<VirtualObject>();
13242  } else {
13243  fast_literal = CreateJSObject(boilerplate_map);
13244  }
13245 
13246  int inobject_properties = boilerplate_map.GetInObjectProperties();
13247 
13248  // Compute the in-object properties to store first.
13249  int index = 0;
13250  for (InternalIndex i :
13251  InternalIndex::Range(boilerplate_map.NumberOfOwnDescriptors())) {
13252  PropertyDetails const property_details =
13253  boilerplate_map.GetPropertyDetails(broker(), i);
13254  if (property_details.location() != PropertyLocation::kField) continue;
13255  DCHECK_EQ(PropertyKind::kData, property_details.kind());
13256  if ((*max_properties)-- == 0) return {};
13257 
13258  int offset = boilerplate_map.GetInObjectPropertyOffset(index);
13259 #ifdef DEBUG
13260  FieldIndex field_index =
13261  FieldIndex::ForDetails(*boilerplate_map.object(), property_details);
13262  DCHECK(field_index.is_inobject());
13263  DCHECK_EQ(index, field_index.property_index());
13264  DCHECK_EQ(field_index.offset(), offset);
13265 #endif
13266 
13267  // The index is derived from the in-sandbox `NumberOfOwnDescriptors` value,
13268  // but the access is out-of-sandbox fast_literal fields.
13269  SBXCHECK_LT(index, inobject_properties);
13270 
13271  // Note: the use of RawInobjectPropertyAt (vs. the higher-level
13272  // GetOwnFastConstantDataProperty) here is necessary, since the underlying
13273  // value may be `uninitialized`, which the latter explicitly does not
13274  // support.
13275  compiler::OptionalObjectRef maybe_boilerplate_value =
13276  boilerplate.RawInobjectPropertyAt(
13277  broker(),
13279  if (!maybe_boilerplate_value.has_value()) return {};
13280 
13281  // Note: We don't need to take a compilation dependency verifying the value
13282  // of `boilerplate_value`, since boilerplate properties are constant after
13283  // initialization modulo map migration. We protect against concurrent map
13284  // migrations (other than elements kind transition, which don't affect us)
13285  // via the boilerplate_migration_access lock.
13286  compiler::ObjectRef boilerplate_value = maybe_boilerplate_value.value();
13287 
13288  if (boilerplate_value.IsJSObject()) {
13289  compiler::JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
13290  std::optional<VirtualObject*> maybe_object_value =
13291  TryReadBoilerplateForFastLiteral(boilerplate_object, allocation,
13292  max_depth - 1, max_properties);
13293  if (!maybe_object_value.has_value()) return {};
13294  fast_literal->set(offset, maybe_object_value.value());
13295  } else if (property_details.representation().IsDouble()) {
13296  fast_literal->set(offset,
13298  boilerplate_value.AsHeapNumber().value_as_bits())));
13299  } else {
13300  // It's fine to store the 'uninitialized' Oddball into a Smi field since
13301  // it will get overwritten anyway.
13302  DCHECK_IMPLIES(property_details.representation().IsSmi() &&
13303  !boilerplate_value.IsSmi(),
13304  IsUninitialized(*boilerplate_value.object()));
13305  fast_literal->set(offset, GetConstant(boilerplate_value));
13306  }
13307  index++;
13308  }
13309 
13310  // Fill slack at the end of the boilerplate object with filler maps.
13311  for (; index < inobject_properties; ++index) {
13313  // TODO(wenyuzhao): Fix incorrect MachineType when V8_MAP_PACKING is
13314  // enabled.
13315  int offset = boilerplate_map.GetInObjectPropertyOffset(index);
13316  fast_literal->set(offset, GetRootConstant(RootIndex::kOnePointerFillerMap));
13317  }
13318 
13319  DCHECK_EQ(JSObject::kElementsOffset, JSArray::kElementsOffset);
13320  // Empty or copy-on-write elements just store a constant.
13321  compiler::MapRef elements_map = boilerplate_elements.map(broker());
13322  // Protect against concurrent changes to the boilerplate object by checking
13323  // for an identical value at the end of the compilation.
13325  boilerplate_elements, HeapObject::kMapOffset, elements_map);
13326  if (boilerplate_elements.length() == 0 ||
13327  elements_map.IsFixedCowArrayMap(broker())) {
13328  if (allocation == AllocationType::kOld &&
13329  !boilerplate.IsElementsTenured(boilerplate_elements)) {
13330  return {};
13331  }
13332  fast_literal->set(JSObject::kElementsOffset,
13333  GetConstant(boilerplate_elements));
13334  } else {
13335  // Compute the elements to store first (might have effects).
13336  if (boilerplate_elements.IsFixedDoubleArray()) {
13337  int const size = FixedDoubleArray::SizeFor(elements_length);
13338  if (size > kMaxRegularHeapObjectSize) return {};
13339  fast_literal->set(
13340  JSObject::kElementsOffset,
13341  CreateDoubleFixedArray(elements_length,
13342  boilerplate_elements.AsFixedDoubleArray()));
13343  } else {
13344  int const size = FixedArray::SizeFor(elements_length);
13345  if (size > kMaxRegularHeapObjectSize) return {};
13346  VirtualObject* elements =
13347  CreateFixedArray(broker()->fixed_array_map(), elements_length);
13348  compiler::FixedArrayRef boilerplate_elements_as_fixed_array =
13349  boilerplate_elements.AsFixedArray();
13350  for (uint32_t i = 0; i < elements_length; ++i) {
13351  if ((*max_properties)-- == 0) return {};
13352  compiler::OptionalObjectRef element_value =
13353  boilerplate_elements_as_fixed_array.TryGet(broker(), i);
13354  if (!element_value.has_value()) return {};
13355  if (element_value->IsJSObject()) {
13356  std::optional<VirtualObject*> object =
13357  TryReadBoilerplateForFastLiteral(element_value->AsJSObject(),
13358  allocation, max_depth - 1,
13359  max_properties);
13360  if (!object.has_value()) return {};
13361  elements->set(FixedArray::OffsetOfElementAt(i), *object);
13362  } else {
13363  elements->set(FixedArray::OffsetOfElementAt(i),
13364  GetConstant(*element_value));
13365  }
13366  }
13367 
13368  fast_literal->set(JSObject::kElementsOffset, elements);
13369  }
13370  }
13371 
13372  return fast_literal;
13373 }
#define SBXCHECK_LT(lhs, rhs)
Definition: check.h:59
static FieldIndex ForDetails(Tagged< Map > map, PropertyDetails details)
static FieldIndex ForInObjectOffset(int offset, Encoding encoding)
void DependOnObjectSlotValue(HeapObjectRef object, int offset, ObjectRef value)
VirtualObject * CreateHeapNumber(Float64 value)
VirtualObject * CreateJSObject(compiler::MapRef map)
VirtualObject * CreateDoubleFixedArray(uint32_t elements_length, compiler::FixedDoubleArrayRef elements)
Range(V< T >, V< T >, V< T >) -> Range< T >
ref_traits< T >::ref_type MakeRef(JSHeapBroker *broker, Tagged< T > object) requires(is_subtype_v< T

References broker(), v8::internal::maglev::NodeBase::Cast(), CHECK, v8::internal::DCHECK(), DCHECK_EQ, DCHECK_GE, DCHECK_IMPLIES, v8::internal::DICTIONARY_ELEMENTS, v8::internal::compiler::JSObjectRef::elements(), v8::internal::FieldIndex::ForDetails(), v8::internal::FieldIndex::ForInObjectOffset(), v8::internal::Float64::FromBits(), v8::internal::compiler::MapRef::GetInObjectProperties(), v8::internal::compiler::MapRef::GetInObjectPropertyOffset(), v8::internal::compiler::MapRef::GetPropertyDetails(), v8::internal::maglev::MaybeReduceResult::HasValue(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::index, v8::internal::compiler::MapRef::instance_size(), v8::internal::compiler::MapRef::is_deprecated(), v8::internal::FieldIndex::is_inobject(), v8::internal::Representation::IsDouble(), v8::internal::compiler::JSObjectRef::IsElementsTenured(), v8::internal::compiler::MapRef::IsFixedCowArrayMap(), v8::internal::Representation::IsSmi(), v8::internal::kData, v8::internal::kField, v8::internal::PropertyDetails::kind(), v8::internal::HeapObject::kMapOffset, v8::internal::kMaxRegularHeapObjectSize, v8::internal::kOld, v8::kRelaxedLoad, v8::internal::FieldIndex::kTagged, v8::internal::compiler::FixedArrayBaseRef::length(), v8::internal::PropertyDetails::location(), v8::internal::compiler::MakeRef(), v8::internal::compiler::HeapObjectRef::map(), v8::internal::compiler::HeapObjectRef::map_direct_read(), v8::internal::compiler::MapRef::NumberOfOwnDescriptors(), v8::internal::compiler::MapRef::object(), v8::internal::FieldIndex::offset(), v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::OffsetOfElementAt(), v8::internal::FieldIndex::property_index(), v8::internal::compiler::JSObjectRef::raw_properties_or_hash(), v8::internal::compiler::JSObjectRef::RawInobjectPropertyAt(), v8::internal::PropertyDetails::representation(), SBXCHECK_LT, v8::internal::maglev::VirtualObject::set(), size(), v8::internal::TaggedArrayBase< FixedArray, TaggedArrayShape >::SizeFor(), v8::internal::PrimitiveArrayBase< FixedDoubleArray, FixedDoubleArrayShape >::SizeFor(), v8::internal::compiler::FixedArrayRef::TryGet(), V8_MAP_PACKING_BOOL, and v8::internal::maglev::MaybeReduceResult::value().

+ Here is the call graph for this function:

◆ TryReduceArrayIteratingBuiltin()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceArrayIteratingBuiltin ( const char *  name,
compiler::JSFunctionRef  target,
CallArguments args,
GetDeoptScopeCallback  get_eager_deopt_scope,
GetDeoptScopeCallback  get_lazy_deopt_scope,
const std::optional< InitialCallback > &  initial_callback = {},
const std::optional< ProcessElementCallback > &  process_element_callback = {} 
)
private

Definition at line 8888 of file maglev-graph-builder.cc.

8893  {
8894  DCHECK_EQ(initial_callback.has_value(), process_element_callback.has_value());
8895 
8896  if (!CanSpeculateCall()) return {};
8897 
8898  ValueNode* receiver = args.receiver();
8899  if (!receiver) return {};
8900 
8901  if (args.count() < 1) {
8902  if (v8_flags.trace_maglev_graph_building) {
8903  std::cout << " ! Failed to reduce " << name << " - not enough arguments"
8904  << std::endl;
8905  }
8906  return {};
8907  }
8908 
8909  auto node_info = known_node_aspects().TryGetInfoFor(receiver);
8910  if (!node_info || !node_info->possible_maps_are_known()) {
8911  if (v8_flags.trace_maglev_graph_building) {
8912  std::cout << " ! Failed to reduce " << name
8913  << " - receiver map is unknown" << std::endl;
8914  }
8915  return {};
8916  }
8917 
8918  ElementsKind elements_kind;
8919  if (!CanInlineArrayIteratingBuiltin(broker(), node_info->possible_maps(),
8920  &elements_kind)) {
8921  if (v8_flags.trace_maglev_graph_building) {
8922  std::cout << " ! Failed to reduce " << name
8923  << " - doesn't support fast array iteration or incompatible"
8924  << " maps" << std::endl;
8925  }
8926  return {};
8927  }
8928 
8929  // TODO(leszeks): May only be needed for holey elements kinds.
8930  if (!broker()->dependencies()->DependOnNoElementsProtector()) {
8931  if (v8_flags.trace_maglev_graph_building) {
8932  std::cout << " ! Failed to reduce " << name
8933  << " - invalidated no elements protector" << std::endl;
8934  }
8935  return {};
8936  }
8937 
8938  ValueNode* callback = args[0];
8939  if (!callback->is_tagged()) {
8940  if (v8_flags.trace_maglev_graph_building) {
8941  std::cout << " ! Failed to reduce " << name
8942  << " - callback is untagged value" << std::endl;
8943  }
8944  return {};
8945  }
8946 
8947  ValueNode* this_arg =
8948  args.count() > 1 ? args[1] : GetRootConstant(RootIndex::kUndefinedValue);
8949 
8950  ValueNode* original_length = BuildLoadJSArrayLength(receiver);
8951 
8952  if (initial_callback) {
8953  RETURN_IF_ABORT((*initial_callback)(original_length));
8954  }
8955 
8956  // Elide the callable check if the node is known callable.
8957  EnsureType(callback, NodeType::kCallable, [&](NodeType old_type) {
8958  // ThrowIfNotCallable is wrapped in a lazy_deopt_scope to make sure the
8959  // exception has the right call stack.
8960  const DeoptFrameScope& lazy_deopt_scope = get_lazy_deopt_scope(
8961  target, receiver, callback, this_arg, GetSmiConstant(0),
8962  GetSmiConstant(0), original_length);
8963  AddNewNode<ThrowIfNotCallable>({callback});
8964  });
8965 
8966  ValueNode* original_length_int32 = GetInt32(original_length);
8967 
8968  // Remember the receiver map set before entering the loop the call.
8969  bool receiver_maps_were_unstable = node_info->possible_maps_are_unstable();
8970  PossibleMaps receiver_maps_before_loop(node_info->possible_maps());
8971 
8972  // Create a sub graph builder with two variables (index and length).
8973  MaglevSubGraphBuilder sub_builder(this, 2);
8974  MaglevSubGraphBuilder::Variable var_index(0);
8975  MaglevSubGraphBuilder::Variable var_length(1);
8976 
8977  MaglevSubGraphBuilder::Label loop_end(&sub_builder, 1);
8978 
8979  // ```
8980  // index = 0
8981  // bind loop_header
8982  // ```
8983  sub_builder.set(var_index, GetSmiConstant(0));
8984  sub_builder.set(var_length, original_length);
8985  MaglevSubGraphBuilder::LoopLabel loop_header =
8986  sub_builder.BeginLoop({&var_index, &var_length});
8987 
8988  // Reset known state that is cleared by BeginLoop, but is known to be true on
8989  // the first iteration, and will be re-checked at the end of the loop.
8990 
8991  // Reset the known receiver maps if necessary.
8992  if (receiver_maps_were_unstable) {
8993  node_info->SetPossibleMaps(receiver_maps_before_loop,
8994  receiver_maps_were_unstable,
8995  // Node type is monotonic, no need to reset it.
8996  NodeType::kUnknown, broker());
8998  } else {
8999  DCHECK_EQ(node_info->possible_maps().size(),
9000  receiver_maps_before_loop.size());
9001  }
9002  // Reset the cached loaded array length to the length var.
9003  RecordKnownProperty(receiver, broker()->length_string(),
9004  sub_builder.get(var_length), false,
9006 
9007  // ```
9008  // if (index_int32 < length_int32)
9009  // fallthrough
9010  // else
9011  // goto end
9012  // ```
9013  Phi* index_tagged = sub_builder.get(var_index)->Cast<Phi>();
9014  EnsureType(index_tagged, NodeType::kSmi);
9015  ValueNode* index_int32 = GetInt32(index_tagged);
9016 
9017  sub_builder.GotoIfFalse<BranchIfInt32Compare>(
9018  &loop_end, {index_int32, original_length_int32}, Operation::kLessThan);
9019 
9020  // ```
9021  // next_index = index + 1
9022  // ```
9023  ValueNode* next_index_int32 = nullptr;
9024  {
9025  // Eager deopt scope for index increment overflow.
9026  // TODO(pthier): In practice this increment can never overflow, as the max
9027  // possible array length is less than int32 max value. Add a new
9028  // Int32Increment that asserts no overflow instead of deopting.
9029  DeoptFrameScope eager_deopt_scope =
9030  get_eager_deopt_scope(target, receiver, callback, this_arg, index_int32,
9031  index_int32, original_length);
9032  next_index_int32 = AddNewNode<Int32IncrementWithOverflow>({index_int32});
9033  EnsureType(next_index_int32, NodeType::kSmi);
9034  }
9035  // TODO(leszeks): Assert Smi.
9036 
9037  // ```
9038  // element = array.elements[index]
9039  // ```
9040  ValueNode* elements = BuildLoadElements(receiver);
9041  ValueNode* element;
9042  if (IsDoubleElementsKind(elements_kind)) {
9043  element = BuildLoadFixedDoubleArrayElement(elements, index_int32);
9044  } else {
9045  element = BuildLoadFixedArrayElement(elements, index_int32);
9046  }
9047 
9048  std::optional<MaglevSubGraphBuilder::Label> skip_call;
9049  if (IsHoleyElementsKind(elements_kind)) {
9050  // ```
9051  // if (element is hole) goto skip_call
9052  // ```
9053  skip_call.emplace(
9054  &sub_builder, 2,
9055  std::initializer_list<MaglevSubGraphBuilder::Variable*>{&var_length});
9056  if (elements_kind == HOLEY_DOUBLE_ELEMENTS) {
9057  sub_builder.GotoIfTrue<BranchIfFloat64IsHole>(&*skip_call, {element});
9058  } else {
9059  sub_builder.GotoIfTrue<BranchIfRootConstant>(&*skip_call, {element},
9060  RootIndex::kTheHoleValue);
9061  }
9062  }
9063 
9064  // ```
9065  // callback(this_arg, element, array)
9066  // ```
9067  MaybeReduceResult result;
9068  {
9069  const DeoptFrameScope& lazy_deopt_scope =
9070  get_lazy_deopt_scope(target, receiver, callback, this_arg, index_int32,
9071  next_index_int32, original_length);
9072  CallArguments call_args =
9073  args.count() < 2
9074  ? CallArguments(ConvertReceiverMode::kNullOrUndefined,
9075  {element, index_tagged, receiver})
9076  : CallArguments(ConvertReceiverMode::kAny,
9077  {this_arg, element, index_tagged, receiver});
9078 
9079  SaveCallSpeculationScope saved(this);
9080  result = ReduceCall(callback, call_args, saved.value());
9081  }
9082 
9083  // ```
9084  // index = next_index
9085  // jump loop_header
9086  // ```
9087  DCHECK_IMPLIES(result.IsDoneWithAbort(), current_block_ == nullptr);
9088 
9089  // No need to finish the loop if this code is unreachable.
9090  if (!result.IsDoneWithAbort()) {
9091  if (process_element_callback) {
9092  ValueNode* value = result.value();
9093  (*process_element_callback)(index_int32, value);
9094  }
9095 
9096  // If any of the receiver's maps were unstable maps, we have to re-check the
9097  // maps on each iteration, in case the callback changed them. That said, we
9098  // know that the maps are valid on the first iteration, so we can rotate the
9099  // check to _after_ the callback, and then elide it if the receiver maps are
9100  // still known to be valid (i.e. the known maps after the call are contained
9101  // inside the known maps before the call).
9102  bool recheck_maps_after_call = receiver_maps_were_unstable;
9103  if (recheck_maps_after_call) {
9104  // No need to recheck maps if there are known maps...
9105  if (auto receiver_info_after_call =
9106  known_node_aspects().TryGetInfoFor(receiver)) {
9107  // ... and those known maps are equal to, or a subset of, the maps
9108  // before the call.
9109  if (receiver_info_after_call &&
9110  receiver_info_after_call->possible_maps_are_known()) {
9111  recheck_maps_after_call = !receiver_maps_before_loop.contains(
9112  receiver_info_after_call->possible_maps());
9113  }
9114  }
9115  }
9116 
9117  // Make sure to finish the loop if we eager deopt in the map check or index
9118  // check.
9119  const DeoptFrameScope& eager_deopt_scope =
9120  get_eager_deopt_scope(target, receiver, callback, this_arg, index_int32,
9121  next_index_int32, original_length);
9122 
9123  if (recheck_maps_after_call) {
9124  // Build the CheckMap manually, since we're doing it with already known
9125  // maps rather than feedback, and we don't need to update known node
9126  // aspects or types since we're at the end of the loop anyway.
9127  bool emit_check_with_migration = std::any_of(
9128  receiver_maps_before_loop.begin(), receiver_maps_before_loop.end(),
9129  [](compiler::MapRef map) { return map.is_migration_target(); });
9130  if (emit_check_with_migration) {
9131  AddNewNode<CheckMapsWithMigration>({receiver},
9132  receiver_maps_before_loop,
9134  } else {
9135  AddNewNode<CheckMaps>({receiver}, receiver_maps_before_loop,
9137  }
9138  }
9139 
9140  // Check if the index is still in bounds, in case the callback changed the
9141  // length.
9142  ValueNode* current_length = BuildLoadJSArrayLength(receiver);
9143  sub_builder.set(var_length, current_length);
9144 
9145  // Reference compare the loaded length against the original length. If this
9146  // is the same value node, then we didn't have any side effects and didn't
9147  // clear the cached length.
9148  if (current_length != original_length) {
9150  TryBuildCheckInt32Condition(original_length_int32, current_length,
9152  DeoptimizeReason::kArrayLengthChanged));
9153  }
9154  }
9155 
9156  if (skip_call.has_value()) {
9157  sub_builder.GotoOrTrim(&*skip_call);
9158  sub_builder.Bind(&*skip_call);
9159  }
9160 
9161  sub_builder.set(var_index, next_index_int32);
9162  sub_builder.EndLoop(&loop_header);
9163 
9164  // ```
9165  // bind end
9166  // ```
9167  sub_builder.Bind(&loop_end);
9168 
9169  return ReduceResult::Done();
9170 }
bool any_of(const C &container, const P &predicate)
bool CanInlineArrayIteratingBuiltin(compiler::JSHeapBroker *broker, const PossibleMaps &maps, ElementsKind *kind_return)

References v8::base::any_of(), v8::base::args, v8::internal::ZoneCompactSet< T >::begin(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::BeginLoop(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(), broker(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::CanInlineArrayIteratingBuiltin(), v8::internal::maglev::NodeBase::Cast(), v8::internal::ZoneCompactSet< T >::contains(), DCHECK_EQ, DCHECK_IMPLIES, v8::internal::ZoneCompactSet< T >::end(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::EndLoop(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::get(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfFalse(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfTrue(), v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::GotoOrTrim(), v8::internal::HOLEY_DOUBLE_ELEMENTS, v8::internal::maglev::ValueNode::is_tagged(), v8::internal::IsDoubleElementsKind(), v8::internal::IsHoleyElementsKind(), v8::internal::kAny, v8::internal::kLessThan, v8::internal::compiler::kLoad, v8::internal::kNullOrUndefined, v8::internal::compiler::kSmi, v8::internal::kUnsignedLessThanEqual, v8::internal::name, v8::base::internal::result, RETURN_IF_ABORT, v8::internal::maglev::MaglevGraphBuilder::MaglevSubGraphBuilder::set(), v8::internal::ZoneCompactSet< T >::size(), v8::internal::v8_flags, and v8::internal::value.

+ Here is the call graph for this function:

◆ TryReduceBuiltin()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceBuiltin ( compiler::JSFunctionRef  target,
compiler::SharedFunctionInfoRef  shared,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 10842 of file maglev-graph-builder.cc.

10844  {
10845  if (args.mode() != CallArguments::kDefault) {
10846  // TODO(victorgomes): Maybe inline the spread stub? Or call known function
10847  // directly if arguments list is an array.
10848  return {};
10849  }
10850  SaveCallSpeculationScope speculate(this, feedback_source);
10851  if (!shared.HasBuiltinId()) return {};
10852  if (v8_flags.trace_maglev_graph_building) {
10853  std::cout << " ! Trying to reduce builtin "
10854  << Builtins::name(shared.builtin_id()) << std::endl;
10855  }
10856  switch (shared.builtin_id()) {
10857 #define CASE(Name, ...) \
10858  case Builtin::k##Name: \
10859  return TryReduce##Name(target, args);
10861 #undef CASE
10862  default:
10863  // TODO(v8:7700): Inline more builtins.
10864  return {};
10865  }
10866 }
static V8_EXPORT_PRIVATE const char * name(Builtin builtin)
Definition: builtins.cc:226
#define MAGLEV_REDUCED_BUILTIN(V)

References v8::base::args, v8::internal::compiler::SharedFunctionInfoRef::builtin_id(), CASE, MAGLEV_REDUCED_BUILTIN, v8::internal::Builtins::name(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ TryReduceCallForApiFunction()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceCallForApiFunction ( compiler::FunctionTemplateInfoRef  api_callback,
compiler::OptionalSharedFunctionInfoRef  maybe_shared,
CallArguments args 
)
private

Definition at line 10981 of file maglev-graph-builder.cc.

10983  {
10984  if (args.mode() != CallArguments::kDefault) {
10985  // TODO(victorgomes): Maybe inline the spread stub? Or call known function
10986  // directly if arguments list is an array.
10987  return {};
10988  }
10989  // Check if the function has an associated C++ code to execute.
10990  compiler::OptionalObjectRef maybe_callback_data =
10991  api_callback.callback_data(broker());
10992  if (!maybe_callback_data.has_value()) {
10993  // TODO(ishell): consider generating "return undefined" for empty function
10994  // instead of failing.
10995  return {};
10996  }
10997 
10999  ValueNode* receiver;
11000  if (maybe_shared.has_value()) {
11001  receiver = GetConvertReceiver(maybe_shared.value(), args);
11002  } else {
11003  receiver = args.receiver();
11004  CHECK_NOT_NULL(receiver);
11005  }
11006 
11009  ? (v8_flags.maglev_inline_api_calls
11013 
11014  return AddNewNode<CallKnownApiFunction>(
11015  input_count,
11016  [&](CallKnownApiFunction* call) {
11017  for (int i = 0; i < static_cast<int>(args.count()); i++) {
11018  call->set_arg(i, GetTaggedValue(args[i]));
11019  }
11020  },
11021  mode, api_callback, GetTaggedValue(GetContext()),
11022  GetTaggedValue(receiver));
11023 }

References v8::base::args, broker(), v8::internal::compiler::FunctionTemplateInfoRef::callback_data(), CHECK_NOT_NULL, v8::internal::compiler::turboshaft::detail::input_count(), mode(), v8::internal::maglev::CallKnownApiFunction::set_arg(), and v8::internal::v8_flags.

Referenced by TryBuildPropertyGetterCall(), and TryBuildPropertySetterCall().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryReduceCallForConstant()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceCallForConstant ( compiler::JSFunctionRef  target,
CallArguments args,
const compiler::FeedbackSource feedback_source = compiler::FeedbackSource() 
)
private

Definition at line 11338 of file maglev-graph-builder.cc.

11340  {
11341  if (args.mode() != CallArguments::kDefault) {
11342  // TODO(victorgomes): Maybe inline the spread stub? Or call known function
11343  // directly if arguments list is an array.
11344  return {};
11345  }
11346  compiler::SharedFunctionInfoRef shared = target.shared(broker());
11347  ValueNode* target_node = GetConstant(target);
11348  // Do not reduce calls to functions with break points.
11349  if (!shared.HasBreakInfo(broker())) {
11350  if (IsClassConstructor(shared.kind())) {
11351  // If we have a class constructor, we should raise an exception.
11352  return BuildCallRuntime(Runtime::kThrowConstructorNonCallableError,
11353  {target_node});
11354  }
11355  DCHECK(IsCallable(*target.object()));
11356  RETURN_IF_DONE(TryReduceBuiltin(target, shared, args, feedback_source));
11358  target, GetRootConstant(RootIndex::kUndefinedValue), args,
11359  feedback_source));
11360  }
11361  return BuildGenericCall(target_node, Call::TargetType::kJSFunction, args);
11362 }
MaybeReduceResult TryReduceBuiltin(compiler::JSFunctionRef target, compiler::SharedFunctionInfoRef shared, CallArguments &args, const compiler::FeedbackSource &feedback_source)

References v8::base::args, broker(), v8::internal::DCHECK(), v8::internal::compiler::SharedFunctionInfoRef::HasBreakInfo(), v8::internal::IsClassConstructor(), v8::internal::compiler::JSFunctionRef::object(), RETURN_IF_DONE, and v8::internal::compiler::JSFunctionRef::shared().

Referenced by TryBuildPropertyGetterCall(), and TryBuildPropertySetterCall().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryReduceCallForNewClosure()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceCallForNewClosure ( ValueNode target_node,
ValueNode target_context,
compiler::SharedFunctionInfoRef  shared,
compiler::FeedbackCellRef  feedback_cell,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11431 of file maglev-graph-builder.cc.

11438  {
11439  // Do not reduce calls to functions with break points.
11440  if (args.mode() != CallArguments::kDefault) {
11441  // TODO(victorgomes): Maybe inline the spread stub? Or call known function
11442  // directly if arguments list is an array.
11443  return {};
11444  }
11445  if (!shared.HasBreakInfo(broker())) {
11446  if (IsClassConstructor(shared.kind())) {
11447  // If we have a class constructor, we should raise an exception.
11448  return BuildCallRuntime(Runtime::kThrowConstructorNonCallableError,
11449  {target_node});
11450  }
11452  target_context, target_node,
11453  GetRootConstant(RootIndex::kUndefinedValue),
11454 #ifdef V8_ENABLE_LEAPTIERING
11455  dispatch_handle,
11456 #endif
11457  shared, feedback_cell, args, feedback_source));
11458  }
11459  return BuildGenericCall(target_node, Call::TargetType::kJSFunction, args);
11460 }

References v8::base::args, broker(), v8::internal::IsClassConstructor(), and RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ TryReduceCallForTarget()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceCallForTarget ( ValueNode target_node,
compiler::JSFunctionRef  target,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11423 of file maglev-graph-builder.cc.

11425  {
11427  target_node, target, DeoptimizeReason::kWrongCallTarget));
11428  return TryReduceCallForConstant(target, args, feedback_source);
11429 }

References v8::base::args, and RETURN_IF_ABORT.

◆ TryReduceCompareEqualAgainstConstant()

template<Operation kOperation>
bool v8::internal::maglev::MaglevGraphBuilder::TryReduceCompareEqualAgainstConstant
private

Definition at line 3086 of file maglev-graph-builder.cc.

3086  {
3087  if (kOperation != Operation::kStrictEqual && kOperation != Operation::kEqual)
3088  return false;
3089 
3090  ValueNode* left = LoadRegister(0);
3091  ValueNode* right = GetAccumulator();
3092 
3093  ValueNode* other = right;
3094  compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(left);
3095  if (!maybe_constant) {
3096  maybe_constant = TryGetConstant(right);
3097  other = left;
3098  }
3099  if (!maybe_constant) return false;
3100 
3101  if (CheckType(other, NodeType::kBoolean)) {
3102  auto CompareOtherWith = [&](bool constant) {
3103  compiler::OptionalHeapObjectRef const_other = TryGetConstant(other);
3104  if (const_other) {
3105  auto bool_other = const_other->TryGetBooleanValue(broker());
3106  if (bool_other.has_value()) {
3107  SetAccumulator(GetBooleanConstant(constant == *bool_other));
3108  return;
3109  }
3110  }
3111  if (constant) {
3112  SetAccumulator(other);
3113  } else {
3114  SetAccumulator(AddNewNode<LogicalNot>({other}));
3115  }
3116  };
3117 
3118  if (maybe_constant.equals(broker_->true_value())) {
3119  CompareOtherWith(true);
3120  return true;
3121  } else if (maybe_constant.equals(broker_->false_value())) {
3122  CompareOtherWith(false);
3123  return true;
3124  } else if (kOperation == Operation::kEqual) {
3125  // For `bool == num` we can convert the actual comparison `ToNumber(bool)
3126  // == num` into `(num == 1) ? bool : ((num == 0) ? !bool : false)`,
3127  std::optional<double> val = {};
3128  if (maybe_constant.value().IsSmi()) {
3129  val = maybe_constant.value().AsSmi();
3130  } else if (maybe_constant.value().IsHeapNumber()) {
3131  val = maybe_constant.value().AsHeapNumber().value();
3132  }
3133  if (val) {
3134  if (*val == 0) {
3135  CompareOtherWith(false);
3136  } else if (*val == 1) {
3137  CompareOtherWith(true);
3138  } else {
3139  // The constant number is neither equal to `ToNumber(true)` nor
3140  // `ToNumber(false)`.
3142  }
3143  return true;
3144  }
3145  }
3146  }
3147 
3148  if (kOperation != Operation::kStrictEqual) return false;
3149 
3150  InstanceType type = maybe_constant.value().map(broker()).instance_type();
3152 
3153  // If the constant is the undefined value, we can compare it
3154  // against holey floats.
3155  if (maybe_constant->IsUndefined()) {
3156  ValueNode* holey_float = nullptr;
3157  if (left->properties().value_representation() ==
3159  holey_float = left;
3160  } else if (right->properties().value_representation() ==
3162  holey_float = right;
3163  }
3164  if (holey_float) {
3165  SetAccumulator(AddNewNode<HoleyFloat64IsHole>({holey_float}));
3166  return true;
3167  }
3168  }
3169 
3170  if (left->properties().value_representation() !=
3172  right->properties().value_representation() !=
3175  } else {
3176  SetAccumulator(BuildTaggedEqual(left, right));
3177  }
3178  return true;
3179 }
constexpr bool IsReferenceComparable(InstanceType instance_type)

References broker(), broker_, BuildTaggedEqual(), CheckType(), GetAccumulator(), GetBooleanConstant(), v8::internal::InstanceTypeChecker::IsReferenceComparable(), v8::internal::kEqual, v8::internal::maglev::kHoleyFloat64, v8::kOperation, v8::internal::maglev::kTagged, LoadRegister(), v8::internal::maglev::NodeBase::properties(), SetAccumulator(), TryGetConstant(), v8::internal::tracing::type, and v8::internal::maglev::OpProperties::value_representation().

+ Here is the call graph for this function:

◆ TryReduceConstantStringAt()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceConstantStringAt ( ValueNode object,
ValueNode index,
StringAtOOBMode  oob_mode 
)
private

Definition at line 9362 of file maglev-graph-builder.cc.

9363  {
9364  auto constant_receiver = TryGetConstant(receiver);
9365  if (!constant_receiver) return {};
9366  if (!constant_receiver->IsString()) {
9367  return EmitUnconditionalDeopt(DeoptimizeReason::kNotAString);
9368  }
9369  compiler::StringRef string = constant_receiver->AsString();
9370  auto maybe_constant_index = TryGetInt32Constant(index);
9371  if (!maybe_constant_index) return {};
9372  int32_t constant_index = *maybe_constant_index;
9373 
9374  if (static_cast<uint32_t>(constant_index) >= string.length()) {
9375  switch (oob_mode) {
9377  // For element access, a negative index triggers a named lookup rather
9378  // than an element lookup; when this is the case, we shouldn't be trying
9379  // to optimize an elements access at all, so deopt.
9380  if (constant_index < 0) {
9382  }
9383  // Otherwise, this is hole-like access, so guard against elements on the
9384  // prototype to return undefined.
9385  if (broker()->dependencies()->DependOnNoElementsProtector()) {
9386  return GetRootConstant(RootIndex::kUndefinedValue);
9387  }
9388  // If the no elements protector is invalidated, unconditionally deopt.
9389  // This shouldn't trigger a deopt look because the feedback should
9390  // transition to megamorphic.
9392  case StringAtOOBMode::kCharAt: {
9393  // OOB for charAt is always the empty string.
9394  return GetRootConstant(RootIndex::kempty_string);
9395  }
9396  }
9397  UNREACHABLE();
9398  }
9399 
9400  if (std::optional<uint16_t> value =
9401  string.GetChar(broker(), constant_index)) {
9403  }
9404  return {};
9405 }
MaybeReduceResult GetConstantSingleCharacterStringFromCode(uint16_t)

References broker(), v8::internal::index, v8::internal::wasm::anonymous_namespace{wasm-external-refs.cc}::kOutOfBounds, v8::internal::length, v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant(), v8::internal::UNREACHABLE(), and v8::internal::value.

Referenced by TryBuildElementAccessOnString().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryReduceConstruct()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceConstruct ( compiler::HeapObjectRef  feedback_target,
ValueNode target,
ValueNode new_target,
CallArguments args,
compiler::FeedbackSource feedback_source 
)
private

Definition at line 12399 of file maglev-graph-builder.cc.

12402  {
12403  DCHECK(!feedback_target.IsAllocationSite());
12404  if (!feedback_target.map(broker()).is_constructor()) {
12405  // TODO(victorgomes): Deal the case where target is not a constructor.
12406  return {};
12407  }
12408 
12409  if (target != new_target) return {};
12410 
12411  // TODO(v8:7700): Add fast paths for other callables.
12412  if (!feedback_target.IsJSFunction()) return {};
12413  compiler::JSFunctionRef function = feedback_target.AsJSFunction();
12414 
12415  // Do not inline constructors with break points.
12416  compiler::SharedFunctionInfoRef shared_function_info =
12417  function.shared(broker());
12418  if (shared_function_info.HasBreakInfo(broker())) {
12419  return {};
12420  }
12421 
12422  // Do not inline cross natives context.
12423  if (function.native_context(broker()) != broker()->target_native_context()) {
12424  return {};
12425  }
12426 
12427  if (args.mode() != CallArguments::kDefault) {
12428  // TODO(victorgomes): Maybe inline the spread stub? Or call known
12429  // function directly if arguments list is an array.
12430  return {};
12431  }
12432 
12433  if (shared_function_info.HasBuiltinId()) {
12434  RETURN_IF_DONE(TryReduceConstructBuiltin(function, shared_function_info,
12435  target, args));
12436  }
12437 
12438  if (shared_function_info.construct_as_builtin()) {
12439  // TODO(victorgomes): Inline JSBuiltinsConstructStub.
12440  return {};
12441  }
12442 
12443  return TryReduceConstructGeneric(function, shared_function_info, target,
12444  new_target, args, feedback_source);
12445 }
MaybeReduceResult TryReduceConstructGeneric(compiler::JSFunctionRef function, compiler::SharedFunctionInfoRef shared_function_info, ValueNode *target, ValueNode *new_target, CallArguments &args, compiler::FeedbackSource &feedback_source)
MaybeReduceResult TryReduceConstructBuiltin(compiler::JSFunctionRef builtin, compiler::SharedFunctionInfoRef shared_function_info, ValueNode *target, CallArguments &args)

References v8::base::args, broker(), v8::internal::DCHECK(), v8::internal::compiler::SharedFunctionInfoRef::HasBreakInfo(), v8::internal::is_constructor, v8::internal::compiler::HeapObjectRef::map(), v8::internal::native_context, and RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ TryReduceConstructArrayConstructor()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceConstructArrayConstructor ( compiler::JSFunctionRef  array_function,
CallArguments args,
compiler::OptionalAllocationSiteRef  maybe_allocation_site = {} 
)
private

Definition at line 12213 of file maglev-graph-builder.cc.

12215  {
12216  ElementsKind elements_kind =
12217  maybe_allocation_site.has_value()
12218  ? maybe_allocation_site->GetElementsKind()
12219  : array_function.initial_map(broker()).elements_kind();
12220  // TODO(victorgomes): Support double elements array.
12221  if (IsDoubleElementsKind(elements_kind)) return {};
12222  DCHECK(IsFastElementsKind(elements_kind));
12223 
12224  std::optional<int> maybe_length;
12225  if (args.count() == 1) {
12226  maybe_length = TryGetInt32Constant(args[0]);
12227  }
12228  compiler::OptionalMapRef maybe_initial_map = GetArrayConstructorInitialMap(
12229  broker(), array_function, elements_kind, args.count(), maybe_length);
12230  if (!maybe_initial_map.has_value()) return {};
12231  compiler::MapRef initial_map = maybe_initial_map.value();
12232  compiler::SlackTrackingPrediction slack_tracking_prediction =
12234  array_function);
12235 
12236  // Tells whether we are protected by either the {site} or a
12237  // call speculation bit to do certain speculative optimizations.
12238  bool can_inline_call = false;
12239  AllocationType allocation_type = AllocationType::kYoung;
12240 
12241  if (maybe_allocation_site) {
12242  can_inline_call = maybe_allocation_site->CanInlineCall();
12243  allocation_type =
12244  broker()->dependencies()->DependOnPretenureMode(*maybe_allocation_site);
12245  broker()->dependencies()->DependOnElementsKind(*maybe_allocation_site);
12246  } else {
12247  can_inline_call = CanSpeculateCall();
12248  }
12249 
12250  if (args.count() == 0) {
12251  return BuildAndAllocateJSArray(
12254  slack_tracking_prediction, allocation_type);
12255  }
12256 
12257  if (maybe_length.has_value() && *maybe_length >= 0 &&
12258  *maybe_length < JSArray::kInitialMaxFastElementArray) {
12259  return BuildAndAllocateJSArray(initial_map, GetSmiConstant(*maybe_length),
12260  BuildElementsArray(*maybe_length),
12261  slack_tracking_prediction, allocation_type);
12262  }
12263 
12264  // TODO(victorgomes): If we know the argument cannot be a number, we should
12265  // allocate an array with one element.
12266  // We don't know anything about the length, so we rely on the allocation
12267  // site to avoid deopt loops.
12268  if (args.count() == 1 && can_inline_call) {
12269  return SelectReduction(
12270  [&](auto& builder) {
12271  return BuildBranchIfInt32Compare(builder,
12272  Operation::kGreaterThanOrEqual,
12273  args[0], GetInt32Constant(0));
12274  },
12275  [&] {
12276  ValueNode* elements =
12277  AddNewNode<AllocateElementsArray>({args[0]}, allocation_type);
12278  return BuildAndAllocateJSArray(initial_map, args[0], elements,
12279  slack_tracking_prediction,
12280  allocation_type);
12281  },
12282  [&] {
12283  ValueNode* error = GetSmiConstant(
12284  static_cast<int>(MessageTemplate::kInvalidArrayLength));
12285  return BuildCallRuntime(Runtime::kThrowRangeError, {error});
12286  });
12287  }
12288 
12289  // TODO(victorgomes): Support the constructor with argument count larger
12290  // than 1.
12291  return {};
12292 }
static const int kInitialMaxFastElementArray
Definition: js-array.h:148
static const int kPreallocatedArrayElements
Definition: js-array.h:126
ReduceResult BuildAndAllocateJSArray(compiler::MapRef map, ValueNode *length, ValueNode *elements, const compiler::SlackTrackingPrediction &slack_tracking_prediction, AllocationType allocation_type)
compiler::OptionalMapRef GetArrayConstructorInitialMap(compiler::JSHeapBroker *broker, compiler::JSFunctionRef array_function, ElementsKind elements_kind, size_t argc, std::optional< int > maybe_length)

References v8::base::args, broker(), v8::internal::DCHECK(), v8::internal::compiler::MapRef::elements_kind(), v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::GetArrayConstructorInitialMap(), v8::internal::compiler::initial_map, v8::internal::compiler::JSFunctionRef::initial_map(), v8::internal::IsDoubleElementsKind(), v8::internal::IsFastElementsKind(), v8::internal::JSArray::kInitialMaxFastElementArray, v8::internal::JSArray::kPreallocatedArrayElements, and v8::internal::kYoung.

+ Here is the call graph for this function:

◆ TryReduceConstructBuiltin()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceConstructBuiltin ( compiler::JSFunctionRef  builtin,
compiler::SharedFunctionInfoRef  shared_function_info,
ValueNode target,
CallArguments args 
)
private

Definition at line 12294 of file maglev-graph-builder.cc.

12297  {
12298  // TODO(victorgomes): specialize more known constants builtin targets.
12299  switch (shared_function_info.builtin_id()) {
12300  case Builtin::kArrayConstructor: {
12302  break;
12303  }
12304  case Builtin::kObjectConstructor: {
12305  // If no value is passed, we can immediately lower to a simple
12306  // constructor.
12307  if (args.count() == 0) {
12309  target, builtin, DeoptimizeReason::kWrongConstructor));
12310  ValueNode* result = BuildInlinedAllocation(CreateJSConstructor(builtin),
12312  return result;
12313  }
12314  break;
12315  }
12316  default:
12317  break;
12318  }
12319  return {};
12320 }

References v8::base::args, v8::internal::compiler::SharedFunctionInfoRef::builtin_id(), v8::internal::kYoung, v8::base::internal::result, RETURN_IF_ABORT, and RETURN_IF_DONE.

+ Here is the call graph for this function:

◆ TryReduceConstructGeneric()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceConstructGeneric ( compiler::JSFunctionRef  function,
compiler::SharedFunctionInfoRef  shared_function_info,
ValueNode target,
ValueNode new_target,
CallArguments args,
compiler::FeedbackSource feedback_source 
)
private

Definition at line 12322 of file maglev-graph-builder.cc.

12326  {
12328  target, function, DeoptimizeReason::kWrongConstructor));
12329 
12330  int construct_arg_count = static_cast<int>(args.count());
12331  base::Vector<ValueNode*> construct_arguments_without_receiver =
12332  zone()->AllocateVector<ValueNode*>(construct_arg_count);
12333  for (int i = 0; i < construct_arg_count; i++) {
12334  construct_arguments_without_receiver[i] = args[i];
12335  }
12336 
12337  if (IsDerivedConstructor(shared_function_info.kind())) {
12338  ValueNode* implicit_receiver = GetRootConstant(RootIndex::kTheHoleValue);
12339  args.set_receiver(implicit_receiver);
12340  ValueNode* call_result;
12341  {
12342  DeoptFrameScope construct(this, implicit_receiver);
12343  MaybeReduceResult result = TryBuildCallKnownJSFunction(
12344  function, new_target, args, feedback_source);
12346  call_result = result.value();
12347  }
12348  if (CheckType(call_result, NodeType::kJSReceiver)) return call_result;
12349  ValueNode* constant_node;
12350  if (compiler::OptionalHeapObjectRef maybe_constant =
12351  TryGetConstant(call_result, &constant_node)) {
12352  compiler::HeapObjectRef constant = maybe_constant.value();
12353  if (constant.IsJSReceiver()) return constant_node;
12354  }
12355  if (!call_result->properties().is_tagged()) {
12356  return BuildCallRuntime(Runtime::kThrowConstructorReturnedNonObject, {});
12357  }
12358  return AddNewNode<CheckDerivedConstructResult>({call_result});
12359  }
12360 
12361  // We do not create a construct stub lazy deopt frame, since
12362  // FastNewObject cannot fail if target is a JSFunction.
12363  ValueNode* implicit_receiver = nullptr;
12364  if (function.has_initial_map(broker())) {
12365  compiler::MapRef map = function.initial_map(broker());
12366  if (map.GetConstructor(broker()).equals(function)) {
12367  implicit_receiver = BuildInlinedAllocation(CreateJSConstructor(function),
12369  }
12370  }
12371  if (implicit_receiver == nullptr) {
12372  implicit_receiver = BuildCallBuiltin<Builtin::kFastNewObject>(
12373  {GetTaggedValue(target), GetTaggedValue(new_target)});
12374  }
12375  EnsureType(implicit_receiver, NodeType::kJSReceiver);
12376 
12377  args.set_receiver(implicit_receiver);
12378  ValueNode* call_result;
12379  {
12380  DeoptFrameScope construct(this, implicit_receiver);
12381  MaybeReduceResult result = TryBuildCallKnownJSFunction(
12382  function, new_target, args, feedback_source);
12384  call_result = result.value();
12385  }
12386  if (CheckType(call_result, NodeType::kJSReceiver)) return call_result;
12387  if (!call_result->properties().is_tagged()) return implicit_receiver;
12388  ValueNode* constant_node;
12389  if (compiler::OptionalHeapObjectRef maybe_constant =
12390  TryGetConstant(call_result, &constant_node)) {
12391  compiler::HeapObjectRef constant = maybe_constant.value();
12392  DCHECK(CheckType(implicit_receiver, NodeType::kJSReceiver));
12393  if (constant.IsJSReceiver()) return constant_node;
12394  return implicit_receiver;
12395  }
12396  return AddNewNode<CheckConstructResult>({call_result, implicit_receiver});
12397 }
bool IsDerivedConstructor(FunctionKind kind)

References v8::base::args, broker(), v8::internal::DCHECK(), v8::internal::compiler::MapRef::GetConstructor(), v8::internal::anonymous_namespace{json-stringifier.cc}::i, v8::internal::maglev::OpProperties::is_tagged(), v8::internal::IsDerivedConstructor(), v8::internal::kYoung, v8::internal::maglev::NodeBase::properties(), v8::base::internal::result, RETURN_IF_ABORT, and v8::internal::compiler::anonymous_namespace{constant-folding-reducer.cc}::TryGetConstant().

+ Here is the call graph for this function:

◆ TryReduceDatePrototypeGetField()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceDatePrototypeGetField ( compiler::JSFunctionRef  target,
CallArguments args,
JSDate::FieldIndex  field 
)
private

Definition at line 9789 of file maglev-graph-builder.cc.

9791  {
9792  DCHECK_LT(field_index, JSDate::kFirstUncachedField);
9793  if (!v8_flags.maglev_inline_date_accessors) return {};
9794  if (!CanSpeculateCall()) return {};
9795 
9796  if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) {
9797  if (v8_flags.trace_maglev_graph_building) {
9798  std::cout << " ! Failed to reduce Date.prototype.GetXXX - no receiver"
9799  << std::endl;
9800  }
9801  return {};
9802  }
9803 
9804  ValueNode* receiver = GetValueOrUndefined(args.receiver());
9805  const NodeInfo* receiver_info = known_node_aspects().TryGetInfoFor(receiver);
9806  // If the map set is not found, then we don't know anything about the map of
9807  // the receiver, so bail.
9808  if (!receiver_info || !receiver_info->possible_maps_are_known()) {
9809  if (v8_flags.trace_maglev_graph_building) {
9810  std::cout
9811  << " ! Failed to reduce Date.prototype.GetXXX - unknown receiver map"
9812  << std::endl;
9813  }
9814  return {};
9815  }
9816 
9817  const PossibleMaps& possible_receiver_maps = receiver_info->possible_maps();
9818  // If the set of possible maps is empty, then there's no possible map for this
9819  // receiver, therefore this path is unreachable at runtime. We're unlikely to
9820  // ever hit this case, BuildCheckMaps should already unconditionally deopt,
9821  // but check it in case another checking operation fails to statically
9822  // unconditionally deopt.
9823  if (possible_receiver_maps.is_empty()) {
9824  // TODO(leszeks): Add an unreachable assert here.
9825  return ReduceResult::DoneWithAbort();
9826  }
9827 
9828  if (!AllOfInstanceTypesAre(possible_receiver_maps, JS_DATE_TYPE)) {
9829  if (v8_flags.trace_maglev_graph_building) {
9830  std::cout
9831  << " ! Failed to reduce Date.prototype.GetXXX - wrong receiver maps "
9832  << std::endl;
9833  }
9834  return {};
9835  }
9836 
9837  if (!broker()
9838  ->dependencies()
9839  ->DependOnNoDateTimeConfigurationChangeProtector()) {
9840  if (v8_flags.trace_maglev_graph_building) {
9841  std::cout << " ! Failed to reduce Date.prototype.GetXXX - "
9842  "NoDateTimeConfigurationChangeProtector invalidated"
9843  << std::endl;
9844  }
9845  return {};
9846  }
9847 
9848  int field_offset = JSDate::kYearOffset + field_index * kTaggedSize;
9849  ValueNode* field_value = BuildLoadTaggedField(receiver, field_offset);
9850  return field_value;
9851 }
bool AllOfInstanceTypesAre(const PossibleMaps &maps, InstanceType type)

References v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::AllOfInstanceTypesAre(), v8::base::args, broker(), DCHECK_LT, v8::internal::ZoneCompactSet< T >::is_empty(), v8::internal::JSDate::kFirstUncachedField, v8::internal::kNullOrUndefined, v8::internal::kTaggedSize, v8::internal::maglev::NodeInfo::possible_maps(), v8::internal::maglev::NodeInfo::possible_maps_are_known(), and v8::internal::v8_flags.

+ Here is the call graph for this function:

◆ TryReduceFunctionPrototypeApplyCallWithReceiver()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceFunctionPrototypeApplyCallWithReceiver ( compiler::OptionalHeapObjectRef  maybe_receiver,
CallArguments args,
const compiler::FeedbackSource feedback_source 
)
private

Definition at line 11463 of file maglev-graph-builder.cc.

11465  {
11466  if (args.mode() != CallArguments::kDefault) return {};
11467 
11468  ValueNode* function = GetValueOrUndefined(args.receiver());
11469  if (maybe_receiver.has_value()) {
11471  function, maybe_receiver.value(), DeoptimizeReason::kWrongCallTarget));
11472  function = GetConstant(maybe_receiver.value());
11473  }
11474 
11475  SaveCallSpeculationScope saved(this);
11476  if (args.count() == 0) {
11477  CallArguments empty_args(ConvertReceiverMode::kNullOrUndefined);
11478  return ReduceCall(function, empty_args, feedback_source);
11479  }
11480  auto build_call_only_with_new_receiver = [&] {
11481  CallArguments new_args(ConvertReceiverMode::kAny, {args[0]});
11482  return ReduceCall(function, new_args, feedback_source);
11483  };
11484  if (args.count() == 1 || IsNullValue(args[1]) || IsUndefinedValue(args[1])) {
11485  return build_call_only_with_new_receiver();
11486  }
11487  auto build_call_with_array_like = [&] {
11488  CallArguments new_args(ConvertReceiverMode::kAny, {args[0], args[1]},
11490  return ReduceCallWithArrayLike(function, new_args, feedback_source);
11491  };
11492  if (!known_node_aspects().MayBeNullOrUndefined(broker(), args[1])) {
11493  return build_call_with_array_like();
11494  }
11495  return SelectReduction(
11496  [&](auto& builder) {
11497  return BuildBranchIfUndefinedOrNull(builder, args[1]);
11498  },
11499  build_call_only_with_new_receiver, build_call_with_array_like);
11500 }
BranchResult BuildBranchIfUndefinedOrNull(BranchBuilder &builder, ValueNode *node)
ReduceResult ReduceCallWithArrayLike(ValueNode *target_node, CallArguments &args, const compiler::FeedbackSource &feedback_source)

References v8::base::args, broker(), v8::internal::kAny, v8::internal::kNullOrUndefined, and RETURN_IF_ABORT.

+ Here is the call graph for this function:

◆ TryReduceGetIterator()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceGetIterator ( ValueNode receiver,
int  load_slot,
int  call_slot 
)
private

Definition at line 15486 of file maglev-graph-builder.cc.

15487  {
15488  // Load iterator method property.
15489  FeedbackSlot load_slot = FeedbackVector::ToSlot(load_slot_index);
15490  compiler::FeedbackSource load_feedback{feedback(), load_slot};
15491  compiler::NameRef iterator_symbol = broker()->iterator_symbol();
15492  ValueNode* iterator_method;
15493  {
15494  DeoptFrameScope deopt_continuation(
15495  this, Builtin::kGetIteratorWithFeedbackLazyDeoptContinuation, {},
15496  base::VectorOf<ValueNode*>({receiver, GetSmiConstant(call_slot_index),
15497  GetConstant(feedback())}));
15498  MaybeReduceResult result_load =
15499  TryBuildLoadNamedProperty(receiver, iterator_symbol, load_feedback);
15500  if (result_load.IsDoneWithAbort() || result_load.IsFail()) {
15501  return result_load;
15502  }
15503  DCHECK(result_load.IsDoneWithValue());
15504  iterator_method = result_load.value();
15505  }
15506  auto throw_iterator_error = [&] {
15507  return BuildCallRuntime(Runtime::kThrowIteratorError, {receiver});
15508  };
15509  if (!iterator_method->is_tagged()) {
15510  return throw_iterator_error();
15511  }
15512  auto throw_symbol_iterator_invalid = [&] {
15513  return BuildCallRuntime(Runtime::kThrowSymbolIteratorInvalid, {});
15514  };
15515  auto call_iterator_method = [&] {
15516  DeoptFrameScope deopt_continuation(
15517  this, Builtin::kCallIteratorWithFeedbackLazyDeoptContinuation);
15518 
15519  FeedbackSlot call_slot = FeedbackVector::ToSlot(call_slot_index);
15520  compiler::FeedbackSource call_feedback{feedback(), call_slot};
15521  CallArguments args(ConvertReceiverMode::kAny, {receiver});
15522  MaybeReduceResult result_call =
15523  ReduceCall(iterator_method, args, call_feedback);
15524 
15525  if (result_call.IsDoneWithAbort()) return result_call;
15526  DCHECK(result_call.IsDoneWithValue());
15527  return SelectReduction(
15528  [&](auto& builder) {
15529  return BuildBranchIfJSReceiver(builder, result_call.value());
15530  },
15531  [&] { return result_call; }, throw_symbol_iterator_invalid);
15532  };
15533  // Check if the iterator_method is undefined and call the method otherwise.
15534  return SelectReduction(
15535  [&](auto& builder) {
15536  return BuildBranchIfUndefined(builder, iterator_method);
15537  },
15538  throw_iterator_error, call_iterator_method);
15539 }
static FeedbackSlot ToSlot(intptr_t index)
BranchResult BuildBranchIfUndefined(BranchBuilder &builder, ValueNode *node)
BranchResult BuildBranchIfJSReceiver(BranchBuilder &builder, ValueNode *value)

References v8::base::args, broker(), v8::internal::DCHECK(), v8::internal::maglev::ValueNode::is_tagged(), v8::internal::maglev::MaybeReduceResult::IsDoneWithAbort(), v8::internal::maglev::MaybeReduceResult::IsDoneWithValue(), v8::internal::maglev::MaybeReduceResult::IsFail(), v8::internal::kAny, v8::internal::FeedbackVector::ToSlot(), and v8::internal::maglev::MaybeReduceResult::value().

+ Here is the call graph for this function:

◆ TryReduceGetProto()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceGetProto ( ValueNode node)
private

Definition at line 10560 of file maglev-graph-builder.cc.

10560  {
10561  NodeInfo* info = known_node_aspects().TryGetInfoFor(object);
10562  if (!info || !info->possible_maps_are_known()) {
10563  return {};
10564  }
10565  auto& possible_maps = info->possible_maps();
10566  if (possible_maps.is_empty()) {
10567  return ReduceResult::DoneWithAbort();
10568  }
10569  auto it = possible_maps.begin();
10570  compiler::MapRef map = *it;
10571  if (IsSpecialReceiverInstanceType(map.instance_type())) {
10572  return {};
10573  }
10574  DCHECK(!map.IsPrimitiveMap() && map.IsJSReceiverMap());
10575  compiler::HeapObjectRef proto = map.prototype(broker());
10576  ++it;
10577  for (; it != possible_maps.end(); ++it) {
10578  map = *it;
10579  if (IsSpecialReceiverInstanceType(map.instance_type()) ||
10580  !proto.equals(map.prototype(broker()))) {
10581  return {};
10582  }
10583  DCHECK(!map.IsPrimitiveMap() && map.IsJSReceiverMap());
10584  }
10585  return GetConstant(proto);
10586 }

References broker(), v8::internal::DCHECK(), v8::internal::compiler::MapRef::instance_type(), v8::internal::compiler::MapRef::IsPrimitiveMap(), v8::internal::IsSpecialReceiverInstanceType(), v8::internal::maglev::NodeInfo::possible_maps(), v8::internal::maglev::NodeInfo::possible_maps_are_known(), and v8::internal::compiler::MapRef::prototype().

+ Here is the call graph for this function:

◆ TryReduceTypeOf() [1/2]

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceTypeOf ( ValueNode value)
private

Definition at line 4079 of file maglev-graph-builder.cc.

4079  {
4080  return TryReduceTypeOf(value,
4081  [&](TypeOfLiteralFlag _, RootIndex idx) -> ValueNode* {
4082  return GetRootConstant(idx);
4083  });
4084 }
interpreter::TestTypeOfFlags::LiteralFlag TypeOfLiteralFlag
MaybeReduceResult TryReduceTypeOf(ValueNode *value, const Function &GetResult)
#define _

References _, GetRootConstant(), TryReduceTypeOf(), and v8::internal::value.

+ Here is the call graph for this function:

◆ TryReduceTypeOf() [2/2]

template<typename Function >
MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReduceTypeOf ( ValueNode value,
const Function GetResult 
)
private

Definition at line 4033 of file maglev-graph-builder.cc.

4034  {
4035  // Similar to TF, we assume that all undetectable receiver objects are also
4036  // callables. In practice, there is only one: document.all.
4037  switch (CheckTypes(
4038  value, {NodeType::kBoolean, NodeType::kNumber, NodeType::kString,
4039  NodeType::kSymbol, NodeType::kCallable, NodeType::kJSArray})) {
4040  case NodeType::kBoolean:
4041  return GetResult(TypeOfLiteralFlag::kBoolean, RootIndex::kboolean_string);
4042  case NodeType::kNumber:
4043  return GetResult(TypeOfLiteralFlag::kNumber, RootIndex::knumber_string);
4044  case NodeType::kString:
4045  return GetResult(TypeOfLiteralFlag::kString, RootIndex::kstring_string);
4046  case NodeType::kSymbol:
4047  return GetResult(TypeOfLiteralFlag::kSymbol, RootIndex::ksymbol_string);
4048  case NodeType::kCallable:
4049  return Select(
4050  [&](auto& builder) {
4051  return BuildBranchIfUndetectable(builder, value);
4052  },
4053  [&] {
4054  return GetResult(TypeOfLiteralFlag::kUndefined,
4055  RootIndex::kundefined_string);
4056  },
4057  [&] {
4058  return GetResult(TypeOfLiteralFlag::kFunction,
4059  RootIndex::kfunction_string);
4060  });
4061  case NodeType::kJSArray:
4062  // TODO(victorgomes): Track JSReceiver, non-callable types in Maglev.
4063  return GetResult(TypeOfLiteralFlag::kObject, RootIndex::kobject_string);
4064  default:
4065  break;
4066  }
4067 
4068  if (IsNullValue(value)) {
4069  return GetResult(TypeOfLiteralFlag::kObject, RootIndex::kobject_string);
4070  }
4071  if (IsUndefinedValue(value)) {
4072  return GetResult(TypeOfLiteralFlag::kUndefined,
4073  RootIndex::kundefined_string);
4074  }
4075 
4076  return {};
4077 }
NodeType CheckTypes(ValueNode *node, std::initializer_list< NodeType > types)
BranchResult BuildBranchIfUndetectable(BranchBuilder &builder, ValueNode *value)

References BuildBranchIfUndetectable(), CheckTypes(), v8::internal::torque::ls::kFunction, Select(), and v8::internal::value.

Referenced by TryReduceTypeOf().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TryReuseKnownPropertyLoad()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TryReuseKnownPropertyLoad ( ValueNode lookup_start_object,
compiler::NameRef  name 
)
private

Definition at line 7430 of file maglev-graph-builder.cc.

7431  {
7432  if (MaybeReduceResult result = TryFindLoadedProperty(
7433  known_node_aspects().loaded_properties, lookup_start_object, name);
7434  result.IsDone()) {
7435  if (v8_flags.trace_maglev_graph_building && result.IsDoneWithValue()) {
7436  std::cout << " * Reusing non-constant loaded property "
7437  << PrintNodeLabel(graph_labeller(), result.value()) << ": "
7438  << PrintNode(graph_labeller(), result.value()) << std::endl;
7439  }
7440  return result;
7441  }
7442  if (MaybeReduceResult result =
7443  TryFindLoadedProperty(known_node_aspects().loaded_constant_properties,
7444  lookup_start_object, name);
7445  result.IsDone()) {
7446  if (v8_flags.trace_maglev_graph_building && result.IsDoneWithValue()) {
7447  std::cout << " * Reusing constant loaded property "
7448  << PrintNodeLabel(graph_labeller(), result.value()) << ": "
7449  << PrintNode(graph_labeller(), result.value()) << std::endl;
7450  }
7451  return result;
7452  }
7453  return {};
7454 }

References v8::internal::name, v8::internal::compiler::anonymous_namespace{node.cc}::PrintNode(), v8::base::internal::result, v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::TryFindLoadedProperty(), and v8::internal::v8_flags.

Referenced by BuildLoadJSArrayLength().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TrySpecializeLoadContextSlot()

ValueNode * v8::internal::maglev::MaglevGraphBuilder::TrySpecializeLoadContextSlot ( ValueNode context,
int  index 
)
private

Definition at line 3513 of file maglev-graph-builder.cc.

3514  {
3515  if (!context_node->Is<Constant>()) return {};
3516  compiler::ContextRef context =
3517  context_node->Cast<Constant>()->ref().AsContext();
3518  auto maybe_value = context.get(broker(), index);
3519  if (!maybe_value || maybe_value->IsTheHole() ||
3520  maybe_value->IsUndefinedContextCell()) {
3521  return {};
3522  }
3523  int offset = Context::OffsetOfElementAt(index);
3524  if (!maybe_value->IsContextCell()) {
3525  // No need to check for context cells anymore.
3526  return BuildLoadTaggedField<LoadTaggedFieldForContextSlotNoCells>(
3527  context_node, offset);
3528  }
3529  compiler::ContextCellRef slot_ref = maybe_value->AsContextCell();
3530  ContextCell::State state = slot_ref.state();
3531  switch (state) {
3532  case ContextCell::kConst: {
3533  auto constant = slot_ref.tagged_value(broker());
3534  if (!constant.has_value()) {
3535  return BuildLoadTaggedField<LoadTaggedFieldForContextSlotNoCells>(
3536  context_node, offset);
3537  }
3538  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3539  return GetConstant(*constant);
3540  }
3541  case ContextCell::kSmi: {
3542  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3543  ValueNode* value = BuildLoadTaggedField(
3544  GetConstant(slot_ref), offsetof(ContextCell, tagged_value_));
3546  return value;
3547  }
3548  case ContextCell::kInt32:
3549  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3550  return AddNewNode<LoadInt32>(
3551  {GetConstant(slot_ref)},
3552  static_cast<int>(offsetof(ContextCell, double_value_)));
3553  case ContextCell::kFloat64:
3554  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3555  return AddNewNode<LoadFloat64>(
3556  {GetConstant(slot_ref)},
3557  static_cast<int>(offsetof(ContextCell, double_value_)));
3559  return BuildLoadTaggedField<LoadTaggedFieldForContextSlotNoCells>(
3560  context_node, offset);
3561  }
3562  UNREACHABLE();
3563 }
bool DependOnContextCell(ContextRef script_context, size_t index, ContextCell::State state, JSHeapBroker *broker)

References broker(), BuildLoadTaggedField(), v8::internal::maglev::NodeBase::Cast(), v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnContextCell(), EnsureType(), v8::internal::compiler::ContextRef::get(), GetConstant(), v8::internal::index, v8::internal::maglev::NodeBase::Is(), v8::internal::ContextCell::kConst, v8::internal::ContextCell::kDetached, v8::internal::ContextCell::kFloat64, v8::internal::ContextCell::kInt32, v8::internal::compiler::kSmi, v8::internal::ContextCell::kSmi, v8::internal::Context::OffsetOfElementAt(), v8::internal::compiler::ContextCellRef::state(), v8::internal::compiler::ContextCellRef::tagged_value(), v8::internal::UNREACHABLE(), and v8::internal::value.

Referenced by LoadAndCacheContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TrySpecializeLoadContextSlotToFunctionContext()

bool v8::internal::maglev::MaglevGraphBuilder::TrySpecializeLoadContextSlotToFunctionContext ( ValueNode context,
int  slot_index,
ContextSlotMutability  slot_mutability 
)
private

Definition at line 3475 of file maglev-graph-builder.cc.

3476  {
3478 
3479  if (slot_mutability == kMutable) return false;
3480 
3481  auto constant = TryGetConstant(context);
3482  if (!constant) return false;
3483 
3484  compiler::ContextRef context_ref = constant.value().AsContext();
3485 
3486  compiler::OptionalObjectRef maybe_slot_value =
3487  context_ref.get(broker(), slot_index);
3488  if (!maybe_slot_value.has_value()) return false;
3489 
3490  compiler::ObjectRef slot_value = maybe_slot_value.value();
3491  if (slot_value.IsHeapObject()) {
3492  // Even though the context slot is immutable, the context might have escaped
3493  // before the function to which it belongs has initialized the slot. We
3494  // must be conservative and check if the value in the slot is currently the
3495  // hole or undefined. Only if it is neither of these, can we be sure that it
3496  // won't change anymore.
3497  //
3498  // See also: JSContextSpecialization::ReduceJSLoadContext.
3499  compiler::OddballType oddball_type =
3500  slot_value.AsHeapObject().map(broker()).oddball_type(broker());
3501  if (oddball_type == compiler::OddballType::kUndefined ||
3502  slot_value.IsTheHole()) {
3503  return false;
3504  }
3505  }
3506 
3507  // Fold the load of the immutable slot.
3508 
3509  SetAccumulator(GetConstant(slot_value));
3510  return true;
3511 }

References broker(), compilation_unit_, v8::internal::DCHECK(), v8::internal::compiler::ContextRef::get(), GetConstant(), v8::internal::maglev::MaglevCompilationUnit::info(), kMutable, v8::internal::compiler::kUndefined, SetAccumulator(), v8::internal::maglev::MaglevCompilationInfo::specialize_to_function_context(), and TryGetConstant().

Referenced by BuildLoadContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ TrySpecializeStoreContextSlot()

MaybeReduceResult v8::internal::maglev::MaglevGraphBuilder::TrySpecializeStoreContextSlot ( ValueNode context,
int  index,
ValueNode value,
Node **  store 
)
private

Definition at line 3617 of file maglev-graph-builder.cc.

3618  {
3620  DCHECK(v8_flags.script_context_cells || v8_flags.function_context_cells);
3621  if (!context->Is<Constant>()) {
3622  *store =
3623  AddNewNode<StoreContextSlotWithWriteBarrier>({context, value}, index);
3624  return ReduceResult::Done();
3625  }
3626 
3627  if (IsEmptyNodeType(GetType(value))) {
3628  return EmitUnconditionalDeopt(DeoptimizeReason::kWrongValue);
3629  }
3630 
3631  compiler::ContextRef context_ref =
3632  context->Cast<Constant>()->ref().AsContext();
3633  auto maybe_value = context_ref.get(broker(), index);
3634  if (!maybe_value || maybe_value->IsTheHole() ||
3635  maybe_value->IsUndefinedContextCell()) {
3636  *store =
3637  AddNewNode<StoreContextSlotWithWriteBarrier>({context, value}, index);
3638  return ReduceResult::Done();
3639  }
3640 
3641  int offset = Context::OffsetOfElementAt(index);
3642  if (!maybe_value->IsContextCell()) {
3643  *store = BuildStoreTaggedField(context, value, offset,
3645  return ReduceResult::Done();
3646  }
3647 
3648  compiler::ContextCellRef slot_ref = maybe_value->AsContextCell();
3649  ContextCell::State state = slot_ref.state();
3650  switch (state) {
3651  case ContextCell::kConst: {
3652  auto constant = slot_ref.tagged_value(broker());
3653  if (!constant.has_value() ||
3654  (constant->IsString() && !constant->IsInternalizedString())) {
3655  *store = AddNewNode<StoreContextSlotWithWriteBarrier>({context, value},
3656  index);
3657  return ReduceResult::Done();
3658  }
3659  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3661  value, *constant, DeoptimizeReason::kStoreToConstant);
3662  }
3663  case ContextCell::kSmi:
3665  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3667  GetConstant(slot_ref), value, offsetof(ContextCell, tagged_value_),
3669  return ReduceResult::Done();
3670  case ContextCell::kInt32:
3671  EnsureInt32(value, true);
3672  *store = AddNewNode<StoreInt32>(
3673  {GetConstant(slot_ref), value},
3674  static_cast<int>(offsetof(ContextCell, double_value_)));
3675  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3676  return ReduceResult::Done();
3677  case ContextCell::kFloat64:
3679  *store = AddNewNode<StoreFloat64>(
3680  {GetConstant(slot_ref), value},
3681  static_cast<int>(offsetof(ContextCell, double_value_)));
3682  broker()->dependencies()->DependOnContextCell(slot_ref, state);
3683  return ReduceResult::Done();
3685  *store = BuildStoreTaggedField(context, value, offset,
3687  return ReduceResult::Done();
3688  }
3689  UNREACHABLE();
3690 }

References broker(), BuildCheckNumber(), BuildCheckNumericalValueOrByReference(), BuildCheckSmi(), BuildStoreTaggedField(), BuildStoreTaggedFieldNoWriteBarrier(), v8::internal::maglev::NodeBase::Cast(), v8::internal::DCHECK(), DCHECK_NOT_NULL, v8::internal::compiler::JSHeapBroker::dependencies(), v8::internal::compiler::CompilationDependencies::DependOnContextCell(), v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), EnsureInt32(), v8::internal::compiler::ContextRef::get(), GetConstant(), GetType(), v8::internal::index, v8::internal::maglev::NodeBase::Is(), v8::internal::maglev::IsEmptyNodeType(), v8::internal::ContextCell::kConst, v8::internal::maglev::kDefault, v8::internal::ContextCell::kDetached, v8::internal::ContextCell::kFloat64, v8::internal::ContextCell::kInt32, v8::internal::ContextCell::kSmi, v8::internal::Context::OffsetOfElementAt(), RETURN_IF_ABORT, v8::internal::compiler::ContextCellRef::state(), store(), v8::internal::compiler::ContextCellRef::tagged_value(), v8::internal::UNREACHABLE(), v8::internal::v8_flags, and v8::internal::value.

Referenced by StoreAndCacheContextSlot().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ UpdatePredecessorCount()

void v8::internal::maglev::MaglevGraphBuilder::UpdatePredecessorCount ( uint32_t  offset,
int  diff 
)
inlineprivate

Definition at line 3203 of file maglev-graph-builder.h.

3203  {
3204  DCHECK_LE(offset, bytecode().length());
3205  DCHECK_LE(0, static_cast<int64_t>(predecessor_count_[offset]) + diff);
3206  DCHECK_IMPLIES(merge_states_[offset],
3207  merge_states_[offset]->predecessor_count() ==
3208  predecessor_count_[offset] + diff);
3209  predecessor_count_[offset] += diff;
3210  }

References DCHECK_IMPLIES, DCHECK_LE, and v8::internal::length.

Referenced by KillPeeledLoopTargets().

+ Here is the caller graph for this function:

◆ UpdateSourceAndBytecodePosition()

void v8::internal::maglev::MaglevGraphBuilder::UpdateSourceAndBytecodePosition ( int  offset)
inlineprivate

Definition at line 809 of file maglev-graph-builder.h.

809  {
810  if (source_position_iterator_.done()) return;
811  if (source_position_iterator_.code_offset() == offset) {
812  current_source_position_ = SourcePosition(
814  inlining_id_);
816  } else {
818  }
819  }

References v8::internal::SourcePositionTableIterator::Advance(), v8::internal::SourcePositionTableIterator::code_offset(), current_source_position_, DCHECK_GT, v8::internal::SourcePositionTableIterator::done(), inlining_id_, v8::internal::SourcePosition::ScriptOffset(), v8::internal::SourcePositionTableIterator::source_position(), and source_position_iterator_.

Referenced by VisitSingleBytecode().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ VisitBinaryOperation()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::VisitBinaryOperation
private

Definition at line 2938 of file maglev-graph-builder.cc.

2938  {
2939  FeedbackNexus nexus = FeedbackNexusForOperand(1);
2940  BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
2941  switch (feedback_hint) {
2943  return EmitUnconditionalDeopt(
2944  DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
2950  auto [allowed_input_type, conversion_type] =
2951  BinopHintToNodeTypeAndConversionType(feedback_hint);
2952  if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
2953  return BuildTruncatingInt32BinaryOperationNodeForToNumber<kOperation>(
2954  allowed_input_type, conversion_type);
2955  } else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
2956  if constexpr (kOperation == Operation::kExponentiate) {
2957  // Exponentiate never updates the feedback to be a Smi.
2958  UNREACHABLE();
2959  } else {
2960  return BuildInt32BinaryOperationNode<kOperation>();
2961  }
2962  } else {
2963  return BuildFloat64BinaryOperationNodeForToNumber<kOperation>(
2964  allowed_input_type, conversion_type);
2965  }
2966  break;
2967  }
2969  if constexpr (kOperation == Operation::kAdd) {
2970  ValueNode* left = LoadRegister(0);
2971  ValueNode* right = GetAccumulator();
2972  return BuildStringConcat(left, right);
2973  }
2974  break;
2976  if constexpr (kOperation == Operation::kAdd) {
2977  if (broker()
2978  ->dependencies()
2979  ->DependOnStringWrapperToPrimitiveProtector()) {
2980  ValueNode* left = LoadRegister(0);
2981  ValueNode* right = GetAccumulator();
2984  left = BuildUnwrapStringWrapper(left);
2985  right = BuildUnwrapStringWrapper(right);
2986  return BuildStringConcat(left, right);
2987  }
2988  }
2989  [[fallthrough]];
2993  // Fallback to generic node.
2994  break;
2995  }
2996  BuildGenericBinaryOperationNode<kOperation>();
2997  return ReduceResult::Done();
2998 }
const FeedbackNexus FeedbackNexusForOperand(int slot_operand_index) const
ReduceResult BuildCheckStringOrStringWrapper(ValueNode *object)
ValueNode * BuildUnwrapStringWrapper(ValueNode *input)
ReduceResult BuildStringConcat(ValueNode *left, ValueNode *right)
std::tuple< NodeType, TaggedToFloat64ConversionType > BinopHintToNodeTypeAndConversionType(BinaryOperationHint hint)

References v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::BinopHintToNodeTypeAndConversionType(), broker(), BuildCheckStringOrStringWrapper(), BuildStringConcat(), BuildUnwrapStringWrapper(), v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), FeedbackNexusForOperand(), GetAccumulator(), v8::internal::FeedbackNexus::GetBinaryOperationFeedback(), v8::internal::kAdditiveSafeInteger, v8::internal::kAny, v8::internal::kBigInt, v8::internal::kBigInt64, v8::internal::kNone, v8::internal::kNumber, v8::internal::kNumberOrOddball, v8::kOperation, v8::internal::kSignedSmall, v8::internal::kSignedSmallInputs, v8::internal::kString, v8::internal::kStringOrStringWrapper, LoadRegister(), RETURN_IF_ABORT, and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ VisitBinarySmiOperation()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::VisitBinarySmiOperation
private

Definition at line 3001 of file maglev-graph-builder.cc.

3001  {
3002  FeedbackNexus nexus = FeedbackNexusForOperand(1);
3003  BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
3004  switch (feedback_hint) {
3006  return EmitUnconditionalDeopt(
3007  DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
3013  const auto [allowed_input_type, conversion_type] =
3014  BinopHintToNodeTypeAndConversionType(feedback_hint);
3015  if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
3017  kOperation>(allowed_input_type, conversion_type);
3018  } else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
3019  if constexpr (kOperation == Operation::kExponentiate) {
3020  // Exponentiate never updates the feedback to be a Smi.
3021  UNREACHABLE();
3022  } else {
3023  return BuildInt32BinarySmiOperationNode<kOperation>();
3024  }
3025  } else {
3026  return BuildFloat64BinarySmiOperationNodeForToNumber<kOperation>(
3027  allowed_input_type, conversion_type);
3028  }
3029  break;
3030  }
3036  // Fallback to generic node.
3037  break;
3038  }
3039  BuildGenericBinarySmiOperationNode<kOperation>();
3040  return ReduceResult::Done();
3041 }
ReduceResult BuildTruncatingInt32BinarySmiOperationNodeForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)

References v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::BinopHintToNodeTypeAndConversionType(), BuildTruncatingInt32BinarySmiOperationNodeForToNumber(), v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), FeedbackNexusForOperand(), v8::internal::FeedbackNexus::GetBinaryOperationFeedback(), v8::internal::kAdditiveSafeInteger, v8::internal::kAny, v8::internal::kBigInt, v8::internal::kBigInt64, v8::internal::kNone, v8::internal::kNumber, v8::internal::kNumberOrOddball, v8::kOperation, v8::internal::kSignedSmall, v8::internal::kSignedSmallInputs, v8::internal::kString, v8::internal::kStringOrStringWrapper, and v8::internal::UNREACHABLE().

+ Here is the call graph for this function:

◆ VisitCompareOperation()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::VisitCompareOperation
private

Definition at line 3182 of file maglev-graph-builder.cc.

3182  {
3183  if (TryReduceCompareEqualAgainstConstant<kOperation>())
3184  return ReduceResult::Done();
3185 
3186  // Compare opcodes are not always commutative. We sort the ones which are for
3187  // better CSE coverage.
3188  auto SortCommute = [](ValueNode*& left, ValueNode*& right) {
3189  if (!v8_flags.maglev_cse) return;
3190  if (kOperation != Operation::kEqual &&
3191  kOperation != Operation::kStrictEqual) {
3192  return;
3193  }
3194  if (left > right) {
3195  std::swap(left, right);
3196  }
3197  };
3198 
3199  auto TryConstantFoldInt32 = [&](ValueNode* left, ValueNode* right) {
3200  if (left->Is<Int32Constant>() && right->Is<Int32Constant>()) {
3201  int left_value = left->Cast<Int32Constant>()->value();
3202  int right_value = right->Cast<Int32Constant>()->value();
3204  OperationValue<kOperation>(left_value, right_value)));
3205  return true;
3206  }
3207  return false;
3208  };
3209 
3210  auto TryConstantFoldEqual = [&](ValueNode* left, ValueNode* right) {
3211  if (left == right) {
3213  GetBooleanConstant(kOperation == Operation::kEqual ||
3214  kOperation == Operation::kStrictEqual ||
3215  kOperation == Operation::kLessThanOrEqual ||
3216  kOperation == Operation::kGreaterThanOrEqual));
3217  return true;
3218  }
3219  return false;
3220  };
3221 
3222  auto MaybeOddballs = [&]() {
3223  auto MaybeOddball = [&](ValueNode* value) {
3224  ValueRepresentation rep = value->value_representation();
3225  switch (rep) {
3229  return false;
3230  default:
3231  break;
3232  }
3233  return !CheckType(value, NodeType::kNumber);
3234  };
3235  return MaybeOddball(LoadRegister(0)) || MaybeOddball(GetAccumulator());
3236  };
3237 
3238  // TODO(victorgomes): Investigate if we can just propagate one of the types
3239  // instead.
3240  auto GetConversionType = [](CompareOperationHint hint) {
3241  switch (hint) {
3243  return std::make_pair(NodeType::kNumber,
3246  return std::make_pair(NodeType::kNumberOrBoolean,
3249  return std::make_pair(NodeType::kNumberOrOddball,
3251  default:
3252  UNREACHABLE();
3253  }
3254  };
3255 
3256  FeedbackNexus nexus = FeedbackNexusForOperand(1);
3257  switch (nexus.GetCompareOperationFeedback()) {
3259  return EmitUnconditionalDeopt(
3260  DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation);
3261 
3263  // TODO(victorgomes): Add a smart equality operator, that compares for
3264  // constants in different representations.
3265  ValueNode* left = GetInt32(LoadRegister(0));
3266  ValueNode* right = GetInt32(GetAccumulator());
3267  if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3268  if (TryConstantFoldInt32(left, right)) return ReduceResult::Done();
3269  SortCommute(left, right);
3270  SetAccumulator(AddNewNode<Int32Compare>({left, right}, kOperation));
3271  return ReduceResult::Done();
3272  }
3274  // Equality and strict equality don't perform ToNumber conversions on
3275  // Oddballs.
3276  if ((kOperation == Operation::kEqual ||
3277  kOperation == Operation::kStrictEqual) &&
3278  MaybeOddballs()) {
3279  break;
3280  }
3281  [[fallthrough]];
3283  if (kOperation == Operation::kStrictEqual && MaybeOddballs()) {
3284  break;
3285  }
3286  [[fallthrough]];
3288  ValueNode* left = LoadRegister(0);
3289  ValueNode* right = GetAccumulator();
3290  if (left->value_representation() == ValueRepresentation::kInt32 &&
3291  right->value_representation() == ValueRepresentation::kInt32) {
3292  if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3293  if (TryConstantFoldInt32(left, right)) return ReduceResult::Done();
3294  SortCommute(left, right);
3295  SetAccumulator(AddNewNode<Int32Compare>({left, right}, kOperation));
3296  return ReduceResult::Done();
3297  }
3298  auto [allowed_input_type, conversion_type] =
3299  GetConversionType(nexus.GetCompareOperationFeedback());
3300  left = GetFloat64ForToNumber(left, allowed_input_type, conversion_type);
3301  right = GetFloat64ForToNumber(right, allowed_input_type, conversion_type);
3302  if (left->Is<Float64Constant>() && right->Is<Float64Constant>()) {
3303  double left_value = left->Cast<Float64Constant>()->value().get_scalar();
3304  double right_value =
3305  right->Cast<Float64Constant>()->value().get_scalar();
3307  OperationValue<kOperation>(left_value, right_value)));
3308  return ReduceResult::Done();
3309  }
3310  SortCommute(left, right);
3311  SetAccumulator(AddNewNode<Float64Compare>({left, right}, kOperation));
3312  return ReduceResult::Done();
3313  }
3315  DCHECK(kOperation == Operation::kEqual ||
3316  kOperation == Operation::kStrictEqual);
3317  ValueNode *left, *right;
3320  SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
3321  return ReduceResult::Done();
3322  }
3324  right =
3326  if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3327  SetAccumulator(BuildTaggedEqual(left, right));
3328  return ReduceResult::Done();
3329  }
3331  DCHECK(kOperation == Operation::kEqual ||
3332  kOperation == Operation::kStrictEqual);
3333 
3334  ValueNode* left = LoadRegister(0);
3335  ValueNode* right = GetAccumulator();
3338  if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3339  SetAccumulator(BuildTaggedEqual(left, right));
3340  return ReduceResult::Done();
3341  }
3343  ValueNode* left = LoadRegister(0);
3344  ValueNode* right = GetAccumulator();
3347 
3348  ValueNode* result;
3349  if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3350  ValueNode* tagged_left = GetTaggedValue(left);
3351  ValueNode* tagged_right = GetTaggedValue(right);
3352  switch (kOperation) {
3353  case Operation::kEqual:
3354  case Operation::kStrictEqual: {
3355  result = AddNewNode<StringEqual>({tagged_left, tagged_right},
3357  break;
3358  }
3359  case Operation::kLessThan:
3360  result = BuildCallBuiltin<Builtin::kStringLessThan>(
3361  {tagged_left, tagged_right});
3362  break;
3363  case Operation::kLessThanOrEqual:
3364  result = BuildCallBuiltin<Builtin::kStringLessThanOrEqual>(
3365  {tagged_left, tagged_right});
3366  break;
3368  result = BuildCallBuiltin<Builtin::kStringGreaterThan>(
3369  {tagged_left, tagged_right});
3370  break;
3371  case Operation::kGreaterThanOrEqual:
3372  result = BuildCallBuiltin<Builtin::kStringGreaterThanOrEqual>(
3373  {tagged_left, tagged_right});
3374  break;
3375  }
3376 
3378  return ReduceResult::Done();
3379  }
3381  if (kOperation == Operation::kStrictEqual) {
3382  ValueNode* left = LoadRegister(0);
3383  ValueNode* right = GetAccumulator();
3386 
3387  if (TryConstantFoldEqual(left, right)) return ReduceResult::Done();
3388 
3389  // TODO(marja): If one of the sides is constant, we can generate better
3390  // code (e.g., don't need to check its type at run time).
3391 
3392  ValueNode* tagged_left = GetTaggedValue(left);
3393  ValueNode* tagged_right = GetTaggedValue(right);
3394  ValueNode* result = AddNewNode<StringEqual>(
3395  {tagged_left, tagged_right}, StringEqualInputs::kStringsOrOddballs);
3397  return ReduceResult::Done();
3398  }
3399  break;
3400  }
3404  break;
3406  if (kOperation == Operation::kEqual) {
3407  break;
3408  }
3409  DCHECK_EQ(kOperation, Operation::kStrictEqual);
3410 
3411  ValueNode* left = LoadRegister(0);
3412  ValueNode* right = GetAccumulator();
3415  SetAccumulator(BuildTaggedEqual(left, right));
3416  return ReduceResult::Done();
3417  }
3419  DCHECK(kOperation == Operation::kEqual ||
3420  kOperation == Operation::kStrictEqual);
3421 
3422  ValueNode* left = LoadRegister(0);
3423  ValueNode* right = GetAccumulator();
3426  SetAccumulator(BuildTaggedEqual(left, right));
3427  return ReduceResult::Done();
3428  }
3429  }
3430 
3431  BuildGenericBinaryOperationNode<kOperation>();
3432  return ReduceResult::Done();
3433 }
ValueNode * GetInternalizedString(interpreter::Register reg)
ReduceResult BuildCheckJSReceiver(ValueNode *object)
ReduceResult BuildCheckStringOrOddball(ValueNode *object)
ReduceResult BuildCheckSymbol(ValueNode *object)
ReduceResult BuildCheckJSReceiverOrNullOrUndefined(ValueNode *object)

References BuildCheckJSReceiver(), BuildCheckJSReceiverOrNullOrUndefined(), BuildCheckString(), BuildCheckStringOrOddball(), BuildCheckSymbol(), BuildTaggedEqual(), v8::internal::maglev::NodeBase::Cast(), CheckType(), v8::internal::DCHECK(), DCHECK_EQ, v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), FeedbackNexusForOperand(), GetAccumulator(), GetBooleanConstant(), v8::internal::FeedbackNexus::GetCompareOperationFeedback(), GetFloat64ForToNumber(), GetInt32(), GetInternalizedString(), v8::internal::interpreter::BytecodeArrayIterator::GetRegisterOperand(), GetRootConstant(), GetTaggedValue(), v8::internal::maglev::NodeBase::Is(), IsRegisterEqualToAccumulator(), iterator_, v8::internal::kAny, v8::internal::kBigInt, v8::internal::kBigInt64, v8::internal::kEqual, v8::internal::maglev::kFloat64, v8::internal::kGreaterThan, v8::internal::maglev::kInt32, v8::internal::kInternalizedString, v8::internal::kLessThan, v8::internal::kNone, v8::internal::kNumber, v8::internal::maglev::kNumberOrBoolean, v8::internal::kNumberOrBoolean, v8::internal::maglev::kNumberOrOddball, v8::internal::kNumberOrOddball, v8::internal::maglev::kOnlyNumber, v8::internal::maglev::kOnlyStrings, v8::kOperation, v8::internal::kReceiver, v8::internal::kReceiverOrNullOrUndefined, v8::internal::kSignedSmall, v8::internal::kString, v8::internal::kStringOrOddball, v8::internal::maglev::kStringsOrOddballs, v8::internal::kSymbol, v8::internal::maglev::kUint32, LoadRegister(), v8::base::internal::result, RETURN_IF_ABORT, SetAccumulator(), v8::internal::UNREACHABLE(), v8::internal::v8_flags, v8::internal::value, v8::internal::maglev::ValueNode::value_representation(), and v8::internal::interpreter::Register::virtual_accumulator().

+ Here is the call graph for this function:

◆ VisitSingleBytecode()

void v8::internal::maglev::MaglevGraphBuilder::VisitSingleBytecode ( )
inlineprivate

Definition at line 827 of file maglev-graph-builder.h.

827  {
828  if (v8_flags.trace_maglev_graph_building) {
829  std::cout << std::setw(4) << iterator_.current_offset() << " : ";
832  std::cout << std::endl;
833  }
834 
835  int offset = iterator_.current_offset();
837 
838  MergePointInterpreterFrameState* merge_state = merge_states_[offset];
839  if (V8_UNLIKELY(merge_state != nullptr)) {
840  bool preserve_known_node_aspects = in_optimistic_peeling_iteration() &&
842  if (merge_state->is_resumable_loop()) {
844  }
845  if (current_block_ != nullptr) {
846  DCHECK(!preserve_known_node_aspects);
847  // TODO(leszeks): Re-evaluate this DCHECK, we might hit it if the only
848  // bytecodes in this basic block were only register juggling.
849  // DCHECK(!node_buffer().empty());
850  BasicBlock* predecessor;
851  if (merge_state->is_loop() && !merge_state->is_resumable_loop() &&
853  predecessor =
854  FinishBlock<CheckpointedJump>({}, &jump_targets_[offset]);
855  } else {
856  predecessor = FinishBlock<Jump>({}, &jump_targets_[offset]);
857  }
858  merge_state->Merge(this, *compilation_unit_, current_interpreter_frame_,
859  predecessor);
860  }
861  if (v8_flags.trace_maglev_graph_building) {
862  auto detail = merge_state->is_exception_handler() ? "exception handler"
863  : merge_state->is_loop() ? "loop header"
864  : "merge";
865  std::cout << "== New block (" << detail << " @" << merge_state
866  << ") at "
868  << "==" << std::endl;
870  }
871 
872  if (V8_UNLIKELY(merge_state->is_exception_handler())) {
873  CHECK_EQ(predecessor_count(offset), 0);
874  // If we have no reference to this block, then the exception handler is
875  // dead.
876  if (!jump_targets_[offset].has_ref() ||
877  !merge_state->exception_handler_was_used()) {
879  return;
880  }
882  } else if (merge_state->is_unmerged_unreachable_loop()) {
883  // We encountered a loop header that is only reachable by the JumpLoop
884  // back-edge, but the bytecode_analysis didn't notice upfront. This can
885  // e.g. be a loop that is entered on a dead fall-through.
886  static_assert(kLoopsMustBeEnteredThroughHeader);
888  return;
889  } else {
890  ProcessMergePoint(offset, preserve_known_node_aspects);
891  }
892 
893  if (is_loop_effect_tracking_enabled() && merge_state->is_loop()) {
894  BeginLoopEffects(offset);
895  }
896  // We pass nullptr for the `predecessor` argument of StartNewBlock because
897  // this block is guaranteed to have a merge_state_, and hence to not have
898  // a `predecessor_` field.
899  StartNewBlock(offset, /*predecessor*/ nullptr);
900  } else if (V8_UNLIKELY(current_block_ == nullptr)) {
901  // If we don't have a current block, the bytecode must be dead (because of
902  // some earlier deopt). Mark this bytecode dead too and return.
903  // TODO(leszeks): Merge these two conditions by marking dead states with
904  // a sentinel value.
905  if (predecessor_count(offset) == 1) {
906  CHECK_NULL(merge_state);
907  CHECK(bytecode_analysis().IsLoopHeader(offset));
908  } else {
909  CHECK_EQ(predecessor_count(offset), 0);
910  }
912  return;
913  }
914 
915  // Handle exceptions if we have a table.
916  if (bytecode().handler_table_size() > 0) {
917  // Pop all entries where offset >= end.
918  while (IsInsideTryBlock()) {
919  HandlerTableEntry& entry = catch_block_stack_.top();
920  if (offset < entry.end) break;
921  catch_block_stack_.pop();
922  }
923  // Push new entries from interpreter handler table where offset >= start
924  // && offset < end.
925  HandlerTable table(*bytecode().object());
926  while (next_handler_table_index_ < table.NumberOfRangeEntries()) {
927  int start = table.GetRangeStart(next_handler_table_index_);
928  if (offset < start) break;
929  int end = table.GetRangeEnd(next_handler_table_index_);
930  if (offset >= end) {
932  continue;
933  }
934  int handler = table.GetRangeHandler(next_handler_table_index_);
935  catch_block_stack_.push({end, handler});
936  DCHECK_NOT_NULL(merge_states_[handler]);
938  }
939  }
940 
942 #ifdef DEBUG
943  // Clear new nodes for the next VisitFoo
944  new_nodes_.clear();
945 #endif
946 
947  if (iterator_.current_bytecode() == interpreter::Bytecode::kJumpLoop &&
949  static_assert(kLoopsMustBeEnteredThroughHeader);
950  CHECK(EmitUnconditionalDeopt(DeoptimizeReason::kOSREarlyExit)
951  .IsDoneWithAbort());
953  return;
954  }
955 
956  switch (iterator_.current_bytecode()) {
957 #define BYTECODE_CASE(name, ...) \
958  case interpreter::Bytecode::k##name: { \
959  if (Visit##name().IsDoneWithAbort()) { \
960  MarkBytecodeDead(); \
961  } \
962  break; \
963  }
965 #undef BYTECODE_CASE
966  }
967  }
#define BYTECODE_CASE(name,...)

References BeginLoopEffects(), bytecode(), bytecode_analysis(), BYTECODE_CASE, BYTECODE_LIST, catch_block_stack_, CHECK, CHECK_EQ, CHECK_NULL, compilation_unit(), compilation_unit_, v8::internal::BitVector::Contains(), v8::internal::interpreter::BytecodeArrayIterator::current_address(), current_block_, v8::internal::interpreter::BytecodeArrayIterator::current_bytecode(), current_for_in_state, current_interpreter_frame_, v8::internal::interpreter::BytecodeArrayIterator::current_offset(), v8::internal::DCHECK(), DCHECK_NOT_NULL, v8::internal::interpreter::BytecodeDecoder::Decode(), EmitUnconditionalDeopt(), v8::internal::compiler::end(), v8::internal::maglev::MaglevGraphBuilder::HandlerTableEntry::end, entrypoint_, v8::internal::maglev::MaglevGraphBuilder::ForInState::enum_cache_indices, v8::internal::maglev::MergePointInterpreterFrameState::exception_handler_was_used(), v8::internal::interpreter::BytecodeArrayIterator::GetJumpTargetOffset(), v8::internal::HandlerTable::GetRangeEnd(), v8::internal::HandlerTable::GetRangeHandler(), v8::internal::HandlerTable::GetRangeStart(), in_optimistic_peeling_iteration(), v8::internal::maglev::MergePointInterpreterFrameState::is_exception_handler(), v8::internal::maglev::MergePointInterpreterFrameState::is_loop(), is_loop_effect_tracking_enabled(), v8::internal::maglev::MergePointInterpreterFrameState::is_resumable_loop(), v8::internal::maglev::MergePointInterpreterFrameState::is_unmerged_unreachable_loop(), IsInsideTryBlock(), iterator_, jump_targets_, kLoopsMustBeEnteredThroughHeader, loop_headers_to_peel_, MarkBytecodeDead(), v8::internal::maglev::MergePointInterpreterFrameState::Merge(), merge_states_, need_checkpointed_loop_entry(), next_handler_table_index_, v8::internal::HandlerTable::NumberOfRangeEntries(), v8::internal::compiler::SharedFunctionInfoRef::object(), predecessor_count(), PrintVirtualObjects(), ProcessMergePoint(), ProcessMergePointAtExceptionHandlerStart(), v8::internal::maglev::MaglevCompilationUnit::shared_function_info(), StartNewBlock(), UpdateSourceAndBytecodePosition(), v8::internal::v8_flags, and V8_UNLIKELY.

Referenced by BuildBody().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

◆ VisitUnaryOperation()

template<Operation kOperation>
ReduceResult v8::internal::maglev::MaglevGraphBuilder::VisitUnaryOperation
private

Definition at line 2662 of file maglev-graph-builder.cc.

2662  {
2663  FeedbackNexus nexus = FeedbackNexusForOperand(0);
2664  BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
2665  switch (feedback_hint) {
2667  return EmitUnconditionalDeopt(
2668  DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation);
2674  auto [allowed_input_type, conversion_type] =
2675  BinopHintToNodeTypeAndConversionType(feedback_hint);
2676  if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
2677  static_assert(kOperation == Operation::kBitwiseNot);
2678  return BuildTruncatingInt32BitwiseNotForToNumber(allowed_input_type,
2679  conversion_type);
2680  } else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
2681  return BuildInt32UnaryOperationNode<kOperation>();
2682  }
2683  return BuildFloat64UnaryOperationNodeForToNumber<kOperation>(
2684  allowed_input_type, conversion_type);
2685  break;
2686  }
2692  // Fallback to generic node.
2693  break;
2694  }
2695  BuildGenericUnaryOperationNode<kOperation>();
2696  return ReduceResult::Done();
2697 }
ReduceResult BuildTruncatingInt32BitwiseNotForToNumber(NodeType allowed_input_type, TaggedToFloat64ConversionType conversion_type)

References v8::internal::maglev::anonymous_namespace{maglev-graph-builder.cc}::BinopHintToNodeTypeAndConversionType(), BuildTruncatingInt32BitwiseNotForToNumber(), v8::internal::maglev::ReduceResult::Done(), EmitUnconditionalDeopt(), FeedbackNexusForOperand(), v8::internal::FeedbackNexus::GetBinaryOperationFeedback(), v8::internal::kAdditiveSafeInteger, v8::internal::kAny, v8::internal::kBigInt, v8::internal::kBigInt64, v8::internal::kNone, v8::internal::kNumber, v8::internal::kNumberOrOddball, v8::kOperation, v8::internal::kSignedSmall, v8::internal::kSignedSmallInputs, v8::internal::kString, and v8::internal::kStringOrStringWrapper.

+ Here is the call graph for this function:

◆ zone()

Zone* v8::internal::maglev::MaglevGraphBuilder::zone ( ) const
inline

Definition at line 357 of file maglev-graph-builder.h.

357 { return compilation_unit_->zone(); }

References compilation_unit_, and v8::internal::maglev::MaglevCompilationUnit::zone().

Referenced by AddInlinedArgumentsToDeoptFrame(), AddNewNode(), AddNewNodeOrGetEquivalent(), AttachDeoptCheckpoint(), AttachEagerDeoptInfo(), AttachLazyDeoptInfo(), Build(), BuildCheckMaps(), BuildCompareMaps(), BuildInitializeStore(), CreateEdgeSplitBlock(), CreateNewConstantNode(), v8::internal::maglev::anonymous_namespace{maglev-interpreter-frame-state.cc}::FromFloat64ToTagged(), v8::internal::maglev::anonymous_namespace{maglev-interpreter-frame-state.cc}::FromHoleyFloat64ToTagged(), v8::internal::maglev::anonymous_namespace{maglev-interpreter-frame-state.cc}::FromInt32ToTagged(), v8::internal::maglev::anonymous_namespace{maglev-interpreter-frame-state.cc}::FromIntPtrToTagged(), v8::internal::maglev::anonymous_namespace{maglev-interpreter-frame-state.cc}::FromUint32ToTagged(), GetDeoptFrameForEagerCall(), GetDeoptFrameForEntryStackCheck(), GetDeoptFrameForLazyDeoptHelper(), GetLatestCheckpointedFrame(), v8::internal::maglev::MergePointInterpreterFrameState::InitializeLoop(), v8::internal::maglev::MergePointInterpreterFrameState::Merge(), v8::internal::maglev::MergePointInterpreterFrameState::MergeLoop(), v8::internal::maglev::MergePointInterpreterFrameState::MergeThrow(), v8::internal::maglev::MergePointInterpreterFrameState::MergeValue(), v8::internal::maglev::MergePointInterpreterFrameState::MergeVirtualObject(), v8::internal::maglev::MergePointInterpreterFrameState::MergeVirtualObjects(), v8::internal::maglev::MergePointInterpreterFrameState::MergeVirtualObjectValue(), ProcessMergePoint(), StartPrologue(), TryBuildNamedAccess(), and v8::internal::maglev::MergePointInterpreterFrameState::TryMergeLoop().

+ Here is the call graph for this function:
+ Here is the caller graph for this function:

Member Data Documentation

◆ allow_loop_peeling_

bool v8::internal::maglev::MaglevGraphBuilder::allow_loop_peeling_
private

Definition at line 3224 of file maglev-graph-builder.h.

◆ any_peeled_loop_

bool v8::internal::maglev::MaglevGraphBuilder::any_peeled_loop_ = false
private

Definition at line 3223 of file maglev-graph-builder.h.

◆ broker_

compiler::JSHeapBroker* v8::internal::maglev::MaglevGraphBuilder::broker_ = compilation_unit_->broker()
private

Definition at line 3173 of file maglev-graph-builder.h.

Referenced by broker(), and TryReduceCompareEqualAgainstConstant().

◆ bytecode_analysis_

compiler::BytecodeAnalysis v8::internal::maglev::MaglevGraphBuilder::bytecode_analysis_
private

Definition at line 3176 of file maglev-graph-builder.h.

◆ caller_details_

◆ catch_block_stack_

ZoneStack<HandlerTableEntry> v8::internal::maglev::MaglevGraphBuilder::catch_block_stack_
private

◆ compilation_unit_

◆ current_allocation_block_

AllocationBlock* v8::internal::maglev::MaglevGraphBuilder::current_allocation_block_ = nullptr
private

◆ current_block_

◆ current_deopt_scope_

DeoptFrameScope* v8::internal::maglev::MaglevGraphBuilder::current_deopt_scope_ = nullptr
private

◆ current_for_in_state

ForInState v8::internal::maglev::MaglevGraphBuilder::current_for_in_state = ForInState()
private

Definition at line 3269 of file maglev-graph-builder.h.

Referenced by VisitSingleBytecode().

◆ current_interpreter_frame_

◆ current_source_position_

◆ current_speculation_feedback_

compiler::FeedbackSource v8::internal::maglev::MaglevGraphBuilder::current_speculation_feedback_
private

◆ current_speculation_mode_

SpeculationMode v8::internal::maglev::MaglevGraphBuilder::current_speculation_mode_
private

◆ decremented_predecessor_offsets_

ZoneVector<int> v8::internal::maglev::MaglevGraphBuilder::decremented_predecessor_offsets_
private

Definition at line 3250 of file maglev-graph-builder.h.

◆ entry_stack_check_frame_

std::optional<InterpretedDeoptFrame> v8::internal::maglev::MaglevGraphBuilder::entry_stack_check_frame_
private

Definition at line 3257 of file maglev-graph-builder.h.

Referenced by GetDeoptFrameForEntryStackCheck().

◆ entrypoint_

int v8::internal::maglev::MaglevGraphBuilder::entrypoint_
private

◆ graph_

◆ in_prologue_

bool v8::internal::maglev::MaglevGraphBuilder::in_prologue_ = true
private

Definition at line 3255 of file maglev-graph-builder.h.

Referenced by Build(), and GetLatestCheckpointedFrame().

◆ inlined_new_target_

ValueNode* v8::internal::maglev::MaglevGraphBuilder::inlined_new_target_ = nullptr
private

Definition at line 3281 of file maglev-graph-builder.h.

◆ inlining_id_

int v8::internal::maglev::MaglevGraphBuilder::inlining_id_ = SourcePosition::kNotInlined
private

Definition at line 3292 of file maglev-graph-builder.h.

Referenced by Build(), BuildBody(), and UpdateSourceAndBytecodePosition().

◆ is_turbolev_

bool v8::internal::maglev::MaglevGraphBuilder::is_turbolev_ = false
private

Definition at line 3283 of file maglev-graph-builder.h.

Referenced by is_turbolev().

◆ iterator_

◆ jump_targets_

BasicBlockRef* v8::internal::maglev::MaglevGraphBuilder::jump_targets_
private

◆ kLoopsMustBeEnteredThroughHeader

constexpr bool v8::internal::maglev::MaglevGraphBuilder::kLoopsMustBeEnteredThroughHeader = true
staticconstexprprivate

Definition at line 523 of file maglev-graph-builder.h.

Referenced by VisitSingleBytecode().

◆ latest_checkpointed_frame_

std::optional<DeoptFrame> v8::internal::maglev::MaglevGraphBuilder::latest_checkpointed_frame_
private

Definition at line 3258 of file maglev-graph-builder.h.

Referenced by GetLatestCheckpointedFrame().

◆ lazy_deopt_result_location_scope_

LazyDeoptResultLocationScope* v8::internal::maglev::MaglevGraphBuilder::lazy_deopt_result_location_scope_ = nullptr
private

◆ local_isolate_

LocalIsolate* const v8::internal::maglev::MaglevGraphBuilder::local_isolate_
private

◆ loop_effects_

LoopEffects* v8::internal::maglev::MaglevGraphBuilder::loop_effects_ = nullptr
private

◆ loop_effects_stack_

ZoneDeque<LoopEffects*> v8::internal::maglev::MaglevGraphBuilder::loop_effects_stack_
private

Definition at line 3244 of file maglev-graph-builder.h.

Referenced by BuildBody(), and MaglevGraphBuilder().

◆ loop_headers_to_peel_

BitVector v8::internal::maglev::MaglevGraphBuilder::loop_headers_to_peel_
private

Definition at line 3252 of file maglev-graph-builder.h.

Referenced by BuildBody(), BuildMergeStates(), and VisitSingleBytecode().

◆ merge_states_

◆ next_handler_table_index_

int v8::internal::maglev::MaglevGraphBuilder::next_handler_table_index_ = 0
private

Definition at line 3293 of file maglev-graph-builder.h.

Referenced by VisitSingleBytecode().

◆ peeled_iteration_count_

int v8::internal::maglev::MaglevGraphBuilder::peeled_iteration_count_ = 0
private

Definition at line 3222 of file maglev-graph-builder.h.

◆ predecessor_count_

uint32_t* v8::internal::maglev::MaglevGraphBuilder::predecessor_count_
private

Definition at line 3220 of file maglev-graph-builder.h.

◆ source_position_iterator_

SourcePositionTableIterator v8::internal::maglev::MaglevGraphBuilder::source_position_iterator_
private

Definition at line 3178 of file maglev-graph-builder.h.

Referenced by BuildBody(), and UpdateSourceAndBytecodePosition().

◆ unobserved_context_slot_stores_

ZoneUnorderedMap<KnownNodeAspects::LoadedContextSlotsKey, Node*> v8::internal::maglev::MaglevGraphBuilder::unobserved_context_slot_stores_
private

Definition at line 3381 of file maglev-graph-builder.h.

Referenced by MaglevGraphBuilder(), and StoreAndCacheContextSlot().


The documentation for this class was generated from the following files: